diff options
Diffstat (limited to 'threads/malloc.c')
| -rw-r--r-- | threads/malloc.c | 294 |
1 files changed, 294 insertions, 0 deletions
diff --git a/threads/malloc.c b/threads/malloc.c new file mode 100644 index 0000000..f6f803b --- /dev/null +++ b/threads/malloc.c | |||
| @@ -0,0 +1,294 @@ | |||
| 1 | #include "threads/malloc.h" | ||
| 2 | #include <debug.h> | ||
| 3 | #include <list.h> | ||
| 4 | #include <round.h> | ||
| 5 | #include <stdint.h> | ||
| 6 | #include <stdio.h> | ||
| 7 | #include <string.h> | ||
| 8 | #include "threads/palloc.h" | ||
| 9 | #include "threads/synch.h" | ||
| 10 | #include "threads/vaddr.h" | ||
| 11 | |||
| 12 | /* A simple implementation of malloc(). | ||
| 13 | |||
| 14 | The size of each request, in bytes, is rounded up to a power | ||
| 15 | of 2 and assigned to the "descriptor" that manages blocks of | ||
| 16 | that size. The descriptor keeps a list of free blocks. If | ||
| 17 | the free list is nonempty, one of its blocks is used to | ||
| 18 | satisfy the request. | ||
| 19 | |||
| 20 | Otherwise, a new page of memory, called an "arena", is | ||
| 21 | obtained from the page allocator (if none is available, | ||
| 22 | malloc() returns a null pointer). The new arena is divided | ||
| 23 | into blocks, all of which are added to the descriptor's free | ||
| 24 | list. Then we return one of the new blocks. | ||
| 25 | |||
| 26 | When we free a block, we add it to its descriptor's free list. | ||
| 27 | But if the arena that the block was in now has no in-use | ||
| 28 | blocks, we remove all of the arena's blocks from the free list | ||
| 29 | and give the arena back to the page allocator. | ||
| 30 | |||
| 31 | We can't handle blocks bigger than 2 kB using this scheme, | ||
| 32 | because they're too big to fit in a single page with a | ||
| 33 | descriptor. We handle those by allocating contiguous pages | ||
| 34 | with the page allocator and sticking the allocation size at | ||
| 35 | the beginning of the allocated block's arena header. */ | ||
| 36 | |||
| 37 | /* Descriptor. */ | ||
| 38 | struct desc | ||
| 39 | { | ||
| 40 | size_t block_size; /* Size of each element in bytes. */ | ||
| 41 | size_t blocks_per_arena; /* Number of blocks in an arena. */ | ||
| 42 | struct list free_list; /* List of free blocks. */ | ||
| 43 | struct lock lock; /* Lock. */ | ||
| 44 | }; | ||
| 45 | |||
| 46 | /* Magic number for detecting arena corruption. */ | ||
| 47 | #define ARENA_MAGIC 0x9a548eed | ||
| 48 | |||
| 49 | /* Arena. */ | ||
| 50 | struct arena | ||
| 51 | { | ||
| 52 | unsigned magic; /* Always set to ARENA_MAGIC. */ | ||
| 53 | struct desc *desc; /* Owning descriptor, null for big block. */ | ||
| 54 | size_t free_cnt; /* Free blocks; pages in big block. */ | ||
| 55 | }; | ||
| 56 | |||
| 57 | /* Free block. */ | ||
| 58 | struct block | ||
| 59 | { | ||
| 60 | struct list_elem free_elem; /* Free list element. */ | ||
| 61 | }; | ||
| 62 | |||
| 63 | /* Our set of descriptors. */ | ||
| 64 | static struct desc descs[10]; /* Descriptors. */ | ||
| 65 | static size_t desc_cnt; /* Number of descriptors. */ | ||
| 66 | |||
| 67 | static struct arena *block_to_arena (struct block *); | ||
| 68 | static struct block *arena_to_block (struct arena *, size_t idx); | ||
| 69 | |||
| 70 | /* Initializes the malloc() descriptors. */ | ||
| 71 | void | ||
| 72 | malloc_init (void) | ||
| 73 | { | ||
| 74 | size_t block_size; | ||
| 75 | |||
| 76 | for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2) | ||
| 77 | { | ||
| 78 | struct desc *d = &descs[desc_cnt++]; | ||
| 79 | ASSERT (desc_cnt <= sizeof descs / sizeof *descs); | ||
| 80 | d->block_size = block_size; | ||
| 81 | d->blocks_per_arena = (PGSIZE - sizeof (struct arena)) / block_size; | ||
| 82 | list_init (&d->free_list); | ||
| 83 | lock_init (&d->lock); | ||
| 84 | } | ||
| 85 | } | ||
| 86 | |||
| 87 | /* Obtains and returns a new block of at least SIZE bytes. | ||
| 88 | Returns a null pointer if memory is not available. */ | ||
| 89 | void * | ||
| 90 | malloc (size_t size) | ||
| 91 | { | ||
| 92 | struct desc *d; | ||
| 93 | struct block *b; | ||
| 94 | struct arena *a; | ||
| 95 | |||
| 96 | /* A null pointer satisfies a request for 0 bytes. */ | ||
| 97 | if (size == 0) | ||
| 98 | return NULL; | ||
| 99 | |||
| 100 | /* Find the smallest descriptor that satisfies a SIZE-byte | ||
| 101 | request. */ | ||
| 102 | for (d = descs; d < descs + desc_cnt; d++) | ||
| 103 | if (d->block_size >= size) | ||
| 104 | break; | ||
| 105 | if (d == descs + desc_cnt) | ||
| 106 | { | ||
| 107 | /* SIZE is too big for any descriptor. | ||
| 108 | Allocate enough pages to hold SIZE plus an arena. */ | ||
| 109 | size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE); | ||
| 110 | a = palloc_get_multiple (0, page_cnt); | ||
| 111 | if (a == NULL) | ||
| 112 | return NULL; | ||
| 113 | |||
| 114 | /* Initialize the arena to indicate a big block of PAGE_CNT | ||
| 115 | pages, and return it. */ | ||
| 116 | a->magic = ARENA_MAGIC; | ||
| 117 | a->desc = NULL; | ||
| 118 | a->free_cnt = page_cnt; | ||
| 119 | return a + 1; | ||
| 120 | } | ||
| 121 | |||
| 122 | lock_acquire (&d->lock); | ||
| 123 | |||
| 124 | /* If the free list is empty, create a new arena. */ | ||
| 125 | if (list_empty (&d->free_list)) | ||
| 126 | { | ||
| 127 | size_t i; | ||
| 128 | |||
| 129 | /* Allocate a page. */ | ||
| 130 | a = palloc_get_page (0); | ||
| 131 | if (a == NULL) | ||
| 132 | { | ||
| 133 | lock_release (&d->lock); | ||
| 134 | return NULL; | ||
| 135 | } | ||
| 136 | |||
| 137 | /* Initialize arena and add its blocks to the free list. */ | ||
| 138 | a->magic = ARENA_MAGIC; | ||
| 139 | a->desc = d; | ||
| 140 | a->free_cnt = d->blocks_per_arena; | ||
| 141 | for (i = 0; i < d->blocks_per_arena; i++) | ||
| 142 | { | ||
| 143 | struct block *b = arena_to_block (a, i); | ||
| 144 | list_push_back (&d->free_list, &b->free_elem); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | /* Get a block from free list and return it. */ | ||
| 149 | b = list_entry (list_pop_front (&d->free_list), struct block, free_elem); | ||
| 150 | a = block_to_arena (b); | ||
| 151 | a->free_cnt--; | ||
| 152 | lock_release (&d->lock); | ||
| 153 | return b; | ||
| 154 | } | ||
| 155 | |||
| 156 | /* Allocates and return A times B bytes initialized to zeroes. | ||
| 157 | Returns a null pointer if memory is not available. */ | ||
| 158 | void * | ||
| 159 | calloc (size_t a, size_t b) | ||
| 160 | { | ||
| 161 | void *p; | ||
| 162 | size_t size; | ||
| 163 | |||
| 164 | /* Calculate block size and make sure it fits in size_t. */ | ||
| 165 | size = a * b; | ||
| 166 | if (size < a || size < b) | ||
| 167 | return NULL; | ||
| 168 | |||
| 169 | /* Allocate and zero memory. */ | ||
| 170 | p = malloc (size); | ||
| 171 | if (p != NULL) | ||
| 172 | memset (p, 0, size); | ||
| 173 | |||
| 174 | return p; | ||
| 175 | } | ||
| 176 | |||
| 177 | /* Returns the number of bytes allocated for BLOCK. */ | ||
| 178 | static size_t | ||
| 179 | block_size (void *block) | ||
| 180 | { | ||
| 181 | struct block *b = block; | ||
| 182 | struct arena *a = block_to_arena (b); | ||
| 183 | struct desc *d = a->desc; | ||
| 184 | |||
| 185 | return d != NULL ? d->block_size : PGSIZE * a->free_cnt - pg_ofs (block); | ||
| 186 | } | ||
| 187 | |||
| 188 | /* Attempts to resize OLD_BLOCK to NEW_SIZE bytes, possibly | ||
| 189 | moving it in the process. | ||
| 190 | If successful, returns the new block; on failure, returns a | ||
| 191 | null pointer. | ||
| 192 | A call with null OLD_BLOCK is equivalent to malloc(NEW_SIZE). | ||
| 193 | A call with zero NEW_SIZE is equivalent to free(OLD_BLOCK). */ | ||
| 194 | void * | ||
| 195 | realloc (void *old_block, size_t new_size) | ||
| 196 | { | ||
| 197 | if (new_size == 0) | ||
| 198 | { | ||
| 199 | free (old_block); | ||
| 200 | return NULL; | ||
| 201 | } | ||
| 202 | else | ||
| 203 | { | ||
| 204 | void *new_block = malloc (new_size); | ||
| 205 | if (old_block != NULL && new_block != NULL) | ||
| 206 | { | ||
| 207 | size_t old_size = block_size (old_block); | ||
| 208 | size_t min_size = new_size < old_size ? new_size : old_size; | ||
| 209 | memcpy (new_block, old_block, min_size); | ||
| 210 | free (old_block); | ||
| 211 | } | ||
| 212 | return new_block; | ||
| 213 | } | ||
| 214 | } | ||
| 215 | |||
| 216 | /* Frees block P, which must have been previously allocated with | ||
| 217 | malloc(), calloc(), or realloc(). */ | ||
| 218 | void | ||
| 219 | free (void *p) | ||
| 220 | { | ||
| 221 | if (p != NULL) | ||
| 222 | { | ||
| 223 | struct block *b = p; | ||
| 224 | struct arena *a = block_to_arena (b); | ||
| 225 | struct desc *d = a->desc; | ||
| 226 | |||
| 227 | if (d != NULL) | ||
| 228 | { | ||
| 229 | /* It's a normal block. We handle it here. */ | ||
| 230 | |||
| 231 | #ifndef NDEBUG | ||
| 232 | /* Clear the block to help detect use-after-free bugs. */ | ||
| 233 | memset (b, 0xcc, d->block_size); | ||
| 234 | #endif | ||
| 235 | |||
| 236 | lock_acquire (&d->lock); | ||
| 237 | |||
| 238 | /* Add block to free list. */ | ||
| 239 | list_push_front (&d->free_list, &b->free_elem); | ||
| 240 | |||
| 241 | /* If the arena is now entirely unused, free it. */ | ||
| 242 | if (++a->free_cnt >= d->blocks_per_arena) | ||
| 243 | { | ||
| 244 | size_t i; | ||
| 245 | |||
| 246 | ASSERT (a->free_cnt == d->blocks_per_arena); | ||
| 247 | for (i = 0; i < d->blocks_per_arena; i++) | ||
| 248 | { | ||
| 249 | struct block *b = arena_to_block (a, i); | ||
| 250 | list_remove (&b->free_elem); | ||
| 251 | } | ||
| 252 | palloc_free_page (a); | ||
| 253 | } | ||
| 254 | |||
| 255 | lock_release (&d->lock); | ||
| 256 | } | ||
| 257 | else | ||
| 258 | { | ||
| 259 | /* It's a big block. Free its pages. */ | ||
| 260 | palloc_free_multiple (a, a->free_cnt); | ||
| 261 | return; | ||
| 262 | } | ||
| 263 | } | ||
| 264 | } | ||
| 265 | |||
| 266 | /* Returns the arena that block B is inside. */ | ||
| 267 | static struct arena * | ||
| 268 | block_to_arena (struct block *b) | ||
| 269 | { | ||
| 270 | struct arena *a = pg_round_down (b); | ||
| 271 | |||
| 272 | /* Check that the arena is valid. */ | ||
| 273 | ASSERT (a != NULL); | ||
| 274 | ASSERT (a->magic == ARENA_MAGIC); | ||
| 275 | |||
| 276 | /* Check that the block is properly aligned for the arena. */ | ||
| 277 | ASSERT (a->desc == NULL | ||
| 278 | || (pg_ofs (b) - sizeof *a) % a->desc->block_size == 0); | ||
| 279 | ASSERT (a->desc != NULL || pg_ofs (b) == sizeof *a); | ||
| 280 | |||
| 281 | return a; | ||
| 282 | } | ||
| 283 | |||
| 284 | /* Returns the (IDX - 1)'th block within arena A. */ | ||
| 285 | static struct block * | ||
| 286 | arena_to_block (struct arena *a, size_t idx) | ||
| 287 | { | ||
| 288 | ASSERT (a != NULL); | ||
| 289 | ASSERT (a->magic == ARENA_MAGIC); | ||
| 290 | ASSERT (idx < a->desc->blocks_per_arena); | ||
| 291 | return (struct block *) ((uint8_t *) a | ||
| 292 | + sizeof *a | ||
| 293 | + idx * a->desc->block_size); | ||
| 294 | } | ||
