summaryrefslogtreecommitdiffstats
path: root/threads/palloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'threads/palloc.c')
-rw-r--r--threads/palloc.c199
1 files changed, 199 insertions, 0 deletions
diff --git a/threads/palloc.c b/threads/palloc.c
new file mode 100644
index 0000000..5c7ef2f
--- /dev/null
+++ b/threads/palloc.c
@@ -0,0 +1,199 @@
1#include "threads/palloc.h"
2#include <bitmap.h>
3#include <debug.h>
4#include <inttypes.h>
5#include <round.h>
6#include <stddef.h>
7#include <stdint.h>
8#include <stdio.h>
9#include <string.h>
10#include "threads/loader.h"
11#include "threads/synch.h"
12#include "threads/vaddr.h"
13
14/* Page allocator. Hands out memory in page-size (or
15 page-multiple) chunks. See malloc.h for an allocator that
16 hands out smaller chunks.
17
18 System memory is divided into two "pools" called the kernel
19 and user pools. The user pool is for user (virtual) memory
20 pages, the kernel pool for everything else. The idea here is
21 that the kernel needs to have memory for its own operations
22 even if user processes are swapping like mad.
23
24 By default, half of system RAM is given to the kernel pool and
25 half to the user pool. That should be huge overkill for the
26 kernel pool, but that's just fine for demonstration purposes. */
27
28/* A memory pool. */
29struct pool
30 {
31 struct lock lock; /* Mutual exclusion. */
32 struct bitmap *used_map; /* Bitmap of free pages. */
33 uint8_t *base; /* Base of pool. */
34 };
35
36/* Two pools: one for kernel data, one for user pages. */
37static struct pool kernel_pool, user_pool;
38
39static void init_pool (struct pool *, void *base, size_t page_cnt,
40 const char *name);
41static bool page_from_pool (const struct pool *, void *page);
42
43/* Initializes the page allocator. At most USER_PAGE_LIMIT
44 pages are put into the user pool. */
45void
46palloc_init (size_t user_page_limit)
47{
48 /* Free memory starts at 1 MB and runs to the end of RAM. */
49 uint8_t *free_start = ptov (1024 * 1024);
50 uint8_t *free_end = ptov (init_ram_pages * PGSIZE);
51 size_t free_pages = (free_end - free_start) / PGSIZE;
52 size_t user_pages = free_pages / 2;
53 size_t kernel_pages;
54 if (user_pages > user_page_limit)
55 user_pages = user_page_limit;
56 kernel_pages = free_pages - user_pages;
57
58 /* Give half of memory to kernel, half to user. */
59 init_pool (&kernel_pool, free_start, kernel_pages, "kernel pool");
60 init_pool (&user_pool, free_start + kernel_pages * PGSIZE,
61 user_pages, "user pool");
62}
63
64/* Obtains and returns a group of PAGE_CNT contiguous free pages.
65 If PAL_USER is set, the pages are obtained from the user pool,
66 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
67 then the pages are filled with zeros. If too few pages are
68 available, returns a null pointer, unless PAL_ASSERT is set in
69 FLAGS, in which case the kernel panics. */
70void *
71palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
72{
73 struct pool *pool = flags & PAL_USER ? &user_pool : &kernel_pool;
74 void *pages;
75 size_t page_idx;
76
77 if (page_cnt == 0)
78 return NULL;
79
80 lock_acquire (&pool->lock);
81 page_idx = bitmap_scan_and_flip (pool->used_map, 0, page_cnt, false);
82 lock_release (&pool->lock);
83
84 if (page_idx != BITMAP_ERROR)
85 pages = pool->base + PGSIZE * page_idx;
86 else
87 pages = NULL;
88
89 if (pages != NULL)
90 {
91 if (flags & PAL_ZERO)
92 memset (pages, 0, PGSIZE * page_cnt);
93 }
94 else
95 {
96 if (flags & PAL_ASSERT)
97 PANIC ("palloc_get: out of pages");
98 }
99
100 return pages;
101}
102
103/* Obtains a single free page and returns its kernel virtual
104 address.
105 If PAL_USER is set, the page is obtained from the user pool,
106 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
107 then the page is filled with zeros. If no pages are
108 available, returns a null pointer, unless PAL_ASSERT is set in
109 FLAGS, in which case the kernel panics. */
110void *
111palloc_get_page (enum palloc_flags flags)
112{
113 return palloc_get_multiple (flags, 1);
114}
115
116/* Frees the PAGE_CNT pages starting at PAGES. */
117void
118palloc_free_multiple (void *pages, size_t page_cnt)
119{
120 struct pool *pool;
121 size_t page_idx;
122
123 ASSERT (pg_ofs (pages) == 0);
124 if (pages == NULL || page_cnt == 0)
125 return;
126
127 if (page_from_pool (&kernel_pool, pages))
128 pool = &kernel_pool;
129 else if (page_from_pool (&user_pool, pages))
130 pool = &user_pool;
131 else
132 NOT_REACHED ();
133
134 page_idx = pg_no (pages) - pg_no (pool->base);
135
136#ifndef NDEBUG
137 memset (pages, 0xcc, PGSIZE * page_cnt);
138#endif
139
140 ASSERT (bitmap_all (pool->used_map, page_idx, page_cnt));
141 bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
142}
143
144/* Frees the page at PAGE. */
145void
146palloc_free_page (void *page)
147{
148 palloc_free_multiple (page, 1);
149}
150
151/* List all used pages */
152void
153palloc_dump_used_pages ()
154{
155 struct pool *pool;
156 int pool_index;
157 for (pool_index = 0; pool_index < 2; pool_index++) {
158 pool = (pool_index == 0) ? &kernel_pool : &user_pool;
159 printf("%s Pool at %p\n", (pool_index == 0) ? "kernel" : "user", pool->base);
160 size_t start = 0, index;
161 while ((index = bitmap_scan(pool->used_map,start,1,true)) != BITMAP_ERROR) {
162 printf(" - %p\n",pool->base + (PGSIZE * index));
163 start = index + 1;
164 }
165 }
166}
167
168/* Initializes pool P as starting at START and ending at END,
169 naming it NAME for debugging purposes. */
170static void
171init_pool (struct pool *p, void *base, size_t page_cnt, const char *name)
172{
173 /* We'll put the pool's used_map at its base.
174 Calculate the space needed for the bitmap
175 and subtract it from the pool's size. */
176 size_t bm_pages = DIV_ROUND_UP (bitmap_buf_size (page_cnt), PGSIZE);
177 if (bm_pages > page_cnt)
178 PANIC ("Not enough memory in %s for bitmap.", name);
179 page_cnt -= bm_pages;
180
181 printf ("%zu pages available in %s.\n", page_cnt, name);
182
183 /* Initialize the pool. */
184 lock_init (&p->lock);
185 p->used_map = bitmap_create_in_buf (page_cnt, base, bm_pages * PGSIZE);
186 p->base = base + bm_pages * PGSIZE;
187}
188
189/* Returns true if PAGE was allocated from POOL,
190 false otherwise. */
191static bool
192page_from_pool (const struct pool *pool, void *page)
193{
194 size_t page_no = pg_no (page);
195 size_t start_page = pg_no (pool->base);
196 size_t end_page = start_page + bitmap_size (pool->used_map);
197
198 return page_no >= start_page && page_no < end_page;
199}