Magellan Linux

Contents of /trunk/mkinitrd-magellan/klibc/usr/klibc/malloc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 815 - (show annotations) (download)
Fri Apr 24 18:32:46 2009 UTC (15 years ago) by niro
File MIME type: text/plain
File size: 7063 byte(s)
-updated to klibc-1.5.15
1 /*
2 * malloc.c
3 *
4 * Very simple linked-list based malloc()/free().
5 */
6
7 #include <stdlib.h>
8 #include <unistd.h>
9 #include <sys/mman.h>
10 #include <assert.h>
11 #include "malloc.h"
12
13 /* Both the arena list and the free memory list are double linked
14 list with head node. This the head node. Note that the arena list
15 is sorted in order of address. */
16 static struct free_arena_header __malloc_head = {
17 {
18 ARENA_TYPE_HEAD,
19 0,
20 &__malloc_head,
21 &__malloc_head,
22 },
23 &__malloc_head,
24 &__malloc_head
25 };
26
27 static inline void mark_block_dead(struct free_arena_header *ah)
28 {
29 #ifdef DEBUG_MALLOC
30 ah->a.type = ARENA_TYPE_DEAD;
31 #endif
32 }
33
34 static inline void remove_from_main_chain(struct free_arena_header *ah)
35 {
36 struct free_arena_header *ap, *an;
37
38 mark_block_dead(ah);
39
40 ap = ah->a.prev;
41 an = ah->a.next;
42 ap->a.next = an;
43 an->a.prev = ap;
44 }
45
46 static inline void remove_from_free_chain(struct free_arena_header *ah)
47 {
48 struct free_arena_header *ap, *an;
49
50 ap = ah->prev_free;
51 an = ah->next_free;
52 ap->next_free = an;
53 an->prev_free = ap;
54 }
55
56 static inline void remove_from_chains(struct free_arena_header *ah)
57 {
58 remove_from_free_chain(ah);
59 remove_from_main_chain(ah);
60 }
61
62 static void *__malloc_from_block(struct free_arena_header *fp, size_t size)
63 {
64 size_t fsize;
65 struct free_arena_header *nfp, *na, *fpn, *fpp;
66
67 fsize = fp->a.size;
68
69 /* We need the 2* to account for the larger requirements of a
70 free block */
71 if (fsize >= size + 2 * sizeof(struct arena_header)) {
72 /* Bigger block than required -- split block */
73 nfp = (struct free_arena_header *)((char *)fp + size);
74 na = fp->a.next;
75
76 nfp->a.type = ARENA_TYPE_FREE;
77 nfp->a.size = fsize - size;
78 fp->a.type = ARENA_TYPE_USED;
79 fp->a.size = size;
80
81 /* Insert into all-block chain */
82 nfp->a.prev = fp;
83 nfp->a.next = na;
84 na->a.prev = nfp;
85 fp->a.next = nfp;
86
87 /* Replace current block on free chain */
88 nfp->next_free = fpn = fp->next_free;
89 nfp->prev_free = fpp = fp->prev_free;
90 fpn->prev_free = nfp;
91 fpp->next_free = nfp;
92 } else {
93 fp->a.type = ARENA_TYPE_USED; /* Allocate the whole block */
94 remove_from_free_chain(fp);
95 }
96
97 return (void *)(&fp->a + 1);
98 }
99
100 static struct free_arena_header *__free_block(struct free_arena_header *ah)
101 {
102 struct free_arena_header *pah, *nah;
103
104 pah = ah->a.prev;
105 nah = ah->a.next;
106 if (pah->a.type == ARENA_TYPE_FREE &&
107 (char *)pah + pah->a.size == (char *)ah) {
108 /* Coalesce into the previous block */
109 pah->a.size += ah->a.size;
110 pah->a.next = nah;
111 nah->a.prev = pah;
112 mark_block_dead(ah);
113
114 ah = pah;
115 pah = ah->a.prev;
116 } else {
117 /* Need to add this block to the free chain */
118 ah->a.type = ARENA_TYPE_FREE;
119
120 ah->next_free = __malloc_head.next_free;
121 ah->prev_free = &__malloc_head;
122 __malloc_head.next_free = ah;
123 ah->next_free->prev_free = ah;
124 }
125
126 /* In either of the previous cases, we might be able to merge
127 with the subsequent block... */
128 if (nah->a.type == ARENA_TYPE_FREE &&
129 (char *)ah + ah->a.size == (char *)nah) {
130 ah->a.size += nah->a.size;
131
132 /* Remove the old block from the chains */
133 remove_from_chains(nah);
134 }
135
136 /* Return the block that contains the called block */
137 return ah;
138 }
139
140 void *malloc(size_t size)
141 {
142 struct free_arena_header *fp;
143 struct free_arena_header *pah;
144 size_t fsize;
145
146 if (size == 0)
147 return NULL;
148
149 /* Add the obligatory arena header, and round up */
150 size = (size + 2 * sizeof(struct arena_header) - 1) & ARENA_SIZE_MASK;
151
152 for (fp = __malloc_head.next_free; fp->a.type != ARENA_TYPE_HEAD;
153 fp = fp->next_free) {
154 if (fp->a.size >= size) {
155 /* Found fit -- allocate out of this block */
156 return __malloc_from_block(fp, size);
157 }
158 }
159
160 /* Nothing found... need to request a block from the kernel */
161 fsize = (size + MALLOC_CHUNK_MASK) & ~MALLOC_CHUNK_MASK;
162
163 #if _KLIBC_MALLOC_USES_SBRK
164 fp = (struct free_arena_header *)sbrk(fsize);
165 #else
166 fp = (struct free_arena_header *)
167 mmap(NULL, fsize, PROT_READ | PROT_WRITE,
168 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
169 #endif
170
171 if (fp == (struct free_arena_header *)MAP_FAILED) {
172 return NULL; /* Failed to get a block */
173 }
174
175 /* Insert the block into the management chains. We need to set
176 up the size and the main block list pointer, the rest of
177 the work is logically identical to free(). */
178 fp->a.type = ARENA_TYPE_FREE;
179 fp->a.size = fsize;
180
181 /* We need to insert this into the main block list in the proper
182 place -- this list is required to be sorted. Since we most likely
183 get memory assignments in ascending order, search backwards for
184 the proper place. */
185 for (pah = __malloc_head.a.prev; pah->a.type != ARENA_TYPE_HEAD;
186 pah = pah->a.prev) {
187 if (pah < fp)
188 break;
189 }
190
191 /* Now pah points to the node that should be the predecessor of
192 the new node */
193 fp->a.next = pah->a.next;
194 fp->a.prev = pah;
195 pah->a.next = fp;
196 fp->a.next->a.prev = fp;
197
198 /* Insert into the free chain and coalesce with adjacent blocks */
199 fp = __free_block(fp);
200
201 /* Now we can allocate from this block */
202 return __malloc_from_block(fp, size);
203 }
204
205 void free(void *ptr)
206 {
207 struct free_arena_header *ah;
208
209 if (!ptr)
210 return;
211
212 ah = (struct free_arena_header *)
213 ((struct arena_header *)ptr - 1);
214
215 #ifdef DEBUG_MALLOC
216 assert(ah->a.type == ARENA_TYPE_USED);
217 #endif
218
219 /* Merge into adjacent free blocks */
220 ah = __free_block(ah);
221
222 /* See if it makes sense to return memory to the system */
223 #if _KLIBC_MALLOC_USES_SBRK
224 if (ah->a.size >= _KLIBC_MALLOC_CHUNK_SIZE &&
225 (char *)ah + ah->a.size == __current_brk) {
226 remove_from_chains(ah);
227 brk(ah);
228 }
229 #else
230 {
231 size_t page_size = getpagesize();
232 size_t page_mask = page_size - 1;
233 size_t head_portion = -(size_t)ah & page_mask;
234 size_t tail_portion = ((size_t)ah + ah->a.size) & page_mask;
235 size_t adj_size;
236
237 /* Careful here... an individual chunk of memory must have
238 a minimum size if it exists at all, so if either the
239 head or the tail is below the minimum, then extend
240 that chunk by a page. */
241
242 if (head_portion &&
243 head_portion < 2*sizeof(struct arena_header))
244 head_portion += page_size;
245
246 if (tail_portion &&
247 tail_portion < 2*sizeof(struct arena_header))
248 tail_portion += page_size;
249
250 adj_size = ah->a.size - head_portion - tail_portion;
251
252 /* Worth it? This is written the way it is to guard
253 against overflows... */
254 if (ah->a.size >= head_portion+tail_portion+
255 _KLIBC_MALLOC_CHUNK_SIZE) {
256 struct free_arena_header *tah, *tan, *tap;
257
258 if (tail_portion) {
259 /* Make a new header, and insert into chains
260 immediately after the current block */
261 tah = (struct free_arena_header *)
262 ((char *)ah + head_portion + adj_size);
263 tah->a.type = ARENA_TYPE_FREE;
264 tah->a.size = tail_portion;
265 tah->a.next = tan = ah->a.next;
266 tan->a.prev = tah;
267 tah->a.prev = ah;
268 ah->a.next = tah;
269 tah->prev_free = tap = ah->prev_free;
270 tap->next_free = tah;
271 tah->next_free = ah;
272 ah->prev_free = tah;
273 }
274
275 if (head_portion)
276 ah->a.size = head_portion;
277 else
278 remove_from_chains(ah);
279
280 munmap((char *)ah + head_portion, adj_size);
281 }
282 }
283 #endif
284 }