diff options
Diffstat (limited to 'threads/thread.c')
| -rw-r--r-- | threads/thread.c | 594 |
1 files changed, 594 insertions, 0 deletions
diff --git a/threads/thread.c b/threads/thread.c new file mode 100644 index 0000000..845f059 --- /dev/null +++ b/threads/thread.c | |||
| @@ -0,0 +1,594 @@ | |||
| 1 | #include "threads/thread.h" | ||
| 2 | #include <debug.h> | ||
| 3 | #include <stddef.h> | ||
| 4 | #include <random.h> | ||
| 5 | #include <stdio.h> | ||
| 6 | #include <string.h> | ||
| 7 | #include "threads/flags.h" | ||
| 8 | #include "threads/interrupt.h" | ||
| 9 | #include "threads/intr-stubs.h" | ||
| 10 | #include "threads/palloc.h" | ||
| 11 | #include "threads/switch.h" | ||
| 12 | #include "threads/synch.h" | ||
| 13 | #include "threads/vaddr.h" | ||
| 14 | #ifdef USERPROG | ||
| 15 | #include "userprog/process.h" | ||
| 16 | #endif | ||
| 17 | |||
| 18 | /* Random value for struct thread's `magic' member. | ||
| 19 | Used to detect stack overflow. See the big comment at the top | ||
| 20 | of thread.h for details. */ | ||
| 21 | #define THREAD_MAGIC 0xcd6abf4b | ||
| 22 | |||
| 23 | /* List of processes in THREAD_READY state, that is, processes | ||
| 24 | that are ready to run but not actually running. */ | ||
| 25 | static struct list ready_list; | ||
| 26 | |||
| 27 | /* List of all processes. Processes are added to this list | ||
| 28 | when they are first scheduled and removed when they exit. */ | ||
| 29 | static struct list all_list; | ||
| 30 | |||
| 31 | /* Idle thread. */ | ||
| 32 | static struct thread *idle_thread; | ||
| 33 | |||
| 34 | /* Initial thread, the thread running init.c:main(). */ | ||
| 35 | static struct thread *initial_thread; | ||
| 36 | |||
| 37 | /* Lock used by allocate_tid(). */ | ||
| 38 | static struct lock tid_lock; | ||
| 39 | |||
| 40 | /* Stack frame for kernel_thread(). */ | ||
| 41 | struct kernel_thread_frame | ||
| 42 | { | ||
| 43 | void *eip; /* Return address. */ | ||
| 44 | thread_func *function; /* Function to call. */ | ||
| 45 | void *aux; /* Auxiliary data for function. */ | ||
| 46 | }; | ||
| 47 | |||
| 48 | /* Statistics. */ | ||
| 49 | static long long idle_ticks; /* # of timer ticks spent idle. */ | ||
| 50 | static long long kernel_ticks; /* # of timer ticks in kernel threads. */ | ||
| 51 | static long long user_ticks; /* # of timer ticks in user programs. */ | ||
| 52 | |||
| 53 | /* Scheduling. */ | ||
| 54 | #define TIME_SLICE 4 /* # of timer ticks to give each thread. */ | ||
| 55 | static unsigned thread_ticks; /* # of timer ticks since last yield. */ | ||
| 56 | |||
| 57 | /* If false (default), use round-robin scheduler. | ||
| 58 | If true, use multi-level feedback queue scheduler. | ||
| 59 | Controlled by kernel command-line option "-o mlfqs". */ | ||
| 60 | bool thread_mlfqs; | ||
| 61 | |||
| 62 | static void kernel_thread (thread_func *, void *aux); | ||
| 63 | |||
| 64 | static void idle (void *aux UNUSED); | ||
| 65 | static struct thread *running_thread (void); | ||
| 66 | static struct thread *next_thread_to_run (void); | ||
| 67 | static void init_thread (struct thread *, const char *name, int priority); | ||
| 68 | static bool is_thread (struct thread *) UNUSED; | ||
| 69 | static void *alloc_frame (struct thread *, size_t size); | ||
| 70 | static void schedule (void); | ||
| 71 | void thread_schedule_tail (struct thread *prev); | ||
| 72 | static tid_t allocate_tid (void); | ||
| 73 | |||
| 74 | /* Initializes the threading system by transforming the code | ||
| 75 | that's currently running into a thread. This can't work in | ||
| 76 | general and it is possible in this case only because loader.S | ||
| 77 | was careful to put the bottom of the stack at a page boundary. | ||
| 78 | |||
| 79 | Also initializes the run queue and the tid lock. | ||
| 80 | |||
| 81 | After calling this function, be sure to initialize the page | ||
| 82 | allocator before trying to create any threads with | ||
| 83 | thread_create(). | ||
| 84 | |||
| 85 | It is not safe to call thread_current() until this function | ||
| 86 | finishes. */ | ||
| 87 | void | ||
| 88 | thread_init (void) | ||
| 89 | { | ||
| 90 | ASSERT (intr_get_level () == INTR_OFF); | ||
| 91 | |||
| 92 | lock_init (&tid_lock); | ||
| 93 | list_init (&ready_list); | ||
| 94 | list_init (&all_list); | ||
| 95 | |||
| 96 | #ifdef USERPROG | ||
| 97 | process_init (); | ||
| 98 | #endif | ||
| 99 | |||
| 100 | /* Set up a thread structure for the running thread. */ | ||
| 101 | initial_thread = running_thread (); | ||
| 102 | init_thread (initial_thread, "main", PRI_DEFAULT); | ||
| 103 | initial_thread->status = THREAD_RUNNING; | ||
| 104 | initial_thread->tid = allocate_tid (); | ||
| 105 | } | ||
| 106 | |||
| 107 | /* Starts preemptive thread scheduling by enabling interrupts. | ||
| 108 | Also creates the idle thread. */ | ||
| 109 | void | ||
| 110 | thread_start (void) | ||
| 111 | { | ||
| 112 | /* Create the idle thread. */ | ||
| 113 | struct semaphore idle_started; | ||
| 114 | sema_init (&idle_started, 0); | ||
| 115 | thread_create ("idle", PRI_MIN, idle, &idle_started); | ||
| 116 | |||
| 117 | /* Start preemptive thread scheduling. */ | ||
| 118 | intr_enable (); | ||
| 119 | |||
| 120 | /* Wait for the idle thread to initialize idle_thread. */ | ||
| 121 | sema_down (&idle_started); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* Called by the timer interrupt handler at each timer tick. | ||
| 125 | Thus, this function runs in an external interrupt context. */ | ||
| 126 | void | ||
| 127 | thread_tick (void) | ||
| 128 | { | ||
| 129 | struct thread *t = thread_current (); | ||
| 130 | |||
| 131 | /* Update statistics. */ | ||
| 132 | if (t == idle_thread) | ||
| 133 | idle_ticks++; | ||
| 134 | #ifdef USERPROG | ||
| 135 | else if (t->pagedir != NULL) | ||
| 136 | user_ticks++; | ||
| 137 | #endif | ||
| 138 | else | ||
| 139 | kernel_ticks++; | ||
| 140 | |||
| 141 | /* Enforce preemption. */ | ||
| 142 | if (++thread_ticks >= TIME_SLICE) | ||
| 143 | intr_yield_on_return (); | ||
| 144 | } | ||
| 145 | |||
| 146 | /* Prints thread statistics. */ | ||
| 147 | void | ||
| 148 | thread_print_stats (void) | ||
| 149 | { | ||
| 150 | printf ("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n", | ||
| 151 | idle_ticks, kernel_ticks, user_ticks); | ||
| 152 | } | ||
| 153 | |||
| 154 | /* Creates a new kernel thread named NAME with the given initial | ||
| 155 | PRIORITY, which executes FUNCTION passing AUX as the argument, | ||
| 156 | and adds it to the ready queue. Returns the thread identifier | ||
| 157 | for the new thread, or TID_ERROR if creation fails. | ||
| 158 | |||
| 159 | If thread_start() has been called, then the new thread may be | ||
| 160 | scheduled before thread_create() returns. It could even exit | ||
| 161 | before thread_create() returns. Contrariwise, the original | ||
| 162 | thread may run for any amount of time before the new thread is | ||
| 163 | scheduled. Use a semaphore or some other form of | ||
| 164 | synchronization if you need to ensure ordering. | ||
| 165 | |||
| 166 | The code provided sets the new thread's `priority' member to | ||
| 167 | PRIORITY, but no actual priority scheduling is implemented. | ||
| 168 | Priority scheduling is the goal of Problem 1-3. */ | ||
| 169 | tid_t | ||
| 170 | thread_create (const char *name, int priority, | ||
| 171 | thread_func *function, void *aux) | ||
| 172 | { | ||
| 173 | struct thread *t; | ||
| 174 | struct kernel_thread_frame *kf; | ||
| 175 | struct switch_entry_frame *ef; | ||
| 176 | struct switch_threads_frame *sf; | ||
| 177 | tid_t tid; | ||
| 178 | enum intr_level old_level; | ||
| 179 | |||
| 180 | ASSERT (function != NULL); | ||
| 181 | |||
| 182 | /* Allocate thread. */ | ||
| 183 | t = palloc_get_page (PAL_ZERO); | ||
| 184 | if (t == NULL) | ||
| 185 | return TID_ERROR; | ||
| 186 | |||
| 187 | /* Initialize thread. */ | ||
| 188 | init_thread (t, name, priority); | ||
| 189 | tid = t->tid = allocate_tid (); | ||
| 190 | |||
| 191 | /* Prepare thread for first run by initializing its stack. | ||
| 192 | Do this atomically so intermediate values for the 'stack' | ||
| 193 | member cannot be observed. */ | ||
| 194 | old_level = intr_disable (); | ||
| 195 | |||
| 196 | /* Stack frame for kernel_thread(). */ | ||
| 197 | kf = alloc_frame (t, sizeof *kf); | ||
| 198 | kf->eip = NULL; | ||
| 199 | kf->function = function; | ||
| 200 | kf->aux = aux; | ||
| 201 | |||
| 202 | /* Stack frame for switch_entry(). */ | ||
| 203 | ef = alloc_frame (t, sizeof *ef); | ||
| 204 | ef->eip = (void (*) (void)) kernel_thread; | ||
| 205 | |||
| 206 | /* Stack frame for switch_threads(). */ | ||
| 207 | sf = alloc_frame (t, sizeof *sf); | ||
| 208 | sf->eip = switch_entry; | ||
| 209 | sf->ebp = 0; | ||
| 210 | |||
| 211 | intr_set_level (old_level); | ||
| 212 | |||
| 213 | /* Add to run queue. */ | ||
| 214 | thread_unblock (t); | ||
| 215 | |||
| 216 | return tid; | ||
| 217 | } | ||
| 218 | |||
| 219 | /* Puts the current thread to sleep. It will not be scheduled | ||
| 220 | again until awoken by thread_unblock(). | ||
| 221 | |||
| 222 | This function must be called with interrupts turned off. It | ||
| 223 | is usually a better idea to use one of the synchronization | ||
| 224 | primitives in synch.h. */ | ||
| 225 | void | ||
| 226 | thread_block (void) | ||
| 227 | { | ||
| 228 | ASSERT (!intr_context ()); | ||
| 229 | ASSERT (intr_get_level () == INTR_OFF); | ||
| 230 | |||
| 231 | thread_current ()->status = THREAD_BLOCKED; | ||
| 232 | schedule (); | ||
| 233 | } | ||
| 234 | |||
| 235 | /* Transitions a blocked thread T to the ready-to-run state. | ||
| 236 | This is an error if T is not blocked. (Use thread_yield() to | ||
| 237 | make the running thread ready.) | ||
| 238 | |||
| 239 | This function does not preempt the running thread. This can | ||
| 240 | be important: if the caller had disabled interrupts itself, | ||
| 241 | it may expect that it can atomically unblock a thread and | ||
| 242 | update other data. */ | ||
| 243 | void | ||
| 244 | thread_unblock (struct thread *t) | ||
| 245 | { | ||
| 246 | enum intr_level old_level; | ||
| 247 | |||
| 248 | ASSERT (is_thread (t)); | ||
| 249 | |||
| 250 | old_level = intr_disable (); | ||
| 251 | ASSERT (t->status == THREAD_BLOCKED); | ||
| 252 | list_push_back (&ready_list, &t->elem); | ||
| 253 | t->status = THREAD_READY; | ||
| 254 | intr_set_level (old_level); | ||
| 255 | } | ||
| 256 | |||
| 257 | /* Returns the name of the running thread. */ | ||
| 258 | const char * | ||
| 259 | thread_name (void) | ||
| 260 | { | ||
| 261 | return thread_current ()->name; | ||
| 262 | } | ||
| 263 | |||
| 264 | /* Returns the running thread. | ||
| 265 | This is running_thread() plus a couple of sanity checks. | ||
| 266 | See the big comment at the top of thread.h for details. */ | ||
| 267 | struct thread * | ||
| 268 | thread_current (void) | ||
| 269 | { | ||
| 270 | struct thread *t = running_thread (); | ||
| 271 | |||
| 272 | /* Make sure T is really a thread. | ||
| 273 | If either of these assertions fire, then your thread may | ||
| 274 | have overflowed its stack. Each thread has less than 4 kB | ||
| 275 | of stack, so a few big automatic arrays or moderate | ||
| 276 | recursion can cause stack overflow. */ | ||
| 277 | ASSERT (is_thread (t)); | ||
| 278 | ASSERT (t->status == THREAD_RUNNING); | ||
| 279 | |||
| 280 | return t; | ||
| 281 | } | ||
| 282 | |||
| 283 | /* Returns the running thread's tid. */ | ||
| 284 | tid_t | ||
| 285 | thread_tid (void) | ||
| 286 | { | ||
| 287 | return thread_current ()->tid; | ||
| 288 | } | ||
| 289 | |||
| 290 | /* Deschedules the current thread and destroys it. Never | ||
| 291 | returns to the caller. */ | ||
| 292 | void | ||
| 293 | thread_exit (void) | ||
| 294 | { | ||
| 295 | ASSERT (!intr_context ()); | ||
| 296 | |||
| 297 | #ifdef USERPROG | ||
| 298 | process_exit (); | ||
| 299 | #endif | ||
| 300 | |||
| 301 | /* Remove thread from all threads list, set our status to dying, | ||
| 302 | and schedule another process. That process will destroy us | ||
| 303 | when it calls thread_schedule_tail(). */ | ||
| 304 | intr_disable (); | ||
| 305 | list_remove (&thread_current()->allelem); | ||
| 306 | thread_current ()->status = THREAD_DYING; | ||
| 307 | schedule (); | ||
| 308 | NOT_REACHED (); | ||
| 309 | } | ||
| 310 | |||
| 311 | /* Yields the CPU. The current thread is not put to sleep and | ||
| 312 | may be scheduled again immediately at the scheduler's whim. */ | ||
| 313 | void | ||
| 314 | thread_yield (void) | ||
| 315 | { | ||
| 316 | struct thread *cur = thread_current (); | ||
| 317 | enum intr_level old_level; | ||
| 318 | |||
| 319 | ASSERT (!intr_context ()); | ||
| 320 | |||
| 321 | old_level = intr_disable (); | ||
| 322 | if (cur != idle_thread) | ||
| 323 | list_push_back (&ready_list, &cur->elem); | ||
| 324 | cur->status = THREAD_READY; | ||
| 325 | schedule (); | ||
| 326 | intr_set_level (old_level); | ||
| 327 | } | ||
| 328 | |||
| 329 | /* Invoke function 'func' on all threads, passing along 'aux'. | ||
| 330 | This function must be called with interrupts off. */ | ||
| 331 | void | ||
| 332 | thread_foreach (thread_action_func *func, void *aux) | ||
| 333 | { | ||
| 334 | struct list_elem *e; | ||
| 335 | |||
| 336 | ASSERT (intr_get_level () == INTR_OFF); | ||
| 337 | |||
| 338 | for (e = list_begin (&all_list); e != list_end (&all_list); | ||
| 339 | e = list_next (e)) | ||
| 340 | { | ||
| 341 | struct thread *t = list_entry (e, struct thread, allelem); | ||
| 342 | func (t, aux); | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | /* Sets the current thread's priority to NEW_PRIORITY. */ | ||
| 347 | void | ||
| 348 | thread_set_priority (int new_priority) | ||
| 349 | { | ||
| 350 | thread_current ()->priority = new_priority; | ||
| 351 | } | ||
| 352 | |||
| 353 | /* Returns the current thread's priority. */ | ||
| 354 | int | ||
| 355 | thread_get_priority (void) | ||
| 356 | { | ||
| 357 | return thread_current ()->priority; | ||
| 358 | } | ||
| 359 | |||
| 360 | /* Sets the current thread's nice value to NICE. */ | ||
| 361 | void | ||
| 362 | thread_set_nice (int nice UNUSED) | ||
| 363 | { | ||
| 364 | /* Not yet implemented. */ | ||
| 365 | } | ||
| 366 | |||
| 367 | /* Returns the current thread's nice value. */ | ||
| 368 | int | ||
| 369 | thread_get_nice (void) | ||
| 370 | { | ||
| 371 | /* Not yet implemented. */ | ||
| 372 | return 0; | ||
| 373 | } | ||
| 374 | |||
| 375 | /* Returns 100 times the system load average. */ | ||
| 376 | int | ||
| 377 | thread_get_load_avg (void) | ||
| 378 | { | ||
| 379 | /* Not yet implemented. */ | ||
| 380 | return 0; | ||
| 381 | } | ||
| 382 | |||
| 383 | /* Returns 100 times the current thread's recent_cpu value. */ | ||
| 384 | int | ||
| 385 | thread_get_recent_cpu (void) | ||
| 386 | { | ||
| 387 | /* Not yet implemented. */ | ||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | /* Idle thread. Executes when no other thread is ready to run. | ||
| 392 | |||
| 393 | The idle thread is initially put on the ready list by | ||
| 394 | thread_start(). It will be scheduled once initially, at which | ||
| 395 | point it initializes idle_thread, "up"s the semaphore passed | ||
| 396 | to it to enable thread_start() to continue, and immediately | ||
| 397 | blocks. After that, the idle thread never appears in the | ||
| 398 | ready list. It is returned by next_thread_to_run() as a | ||
| 399 | special case when the ready list is empty. */ | ||
| 400 | static void | ||
| 401 | idle (void *idle_started_ UNUSED) | ||
| 402 | { | ||
| 403 | struct semaphore *idle_started = idle_started_; | ||
| 404 | idle_thread = thread_current (); | ||
| 405 | sema_up (idle_started); | ||
| 406 | |||
| 407 | for (;;) | ||
| 408 | { | ||
| 409 | /* Let someone else run. */ | ||
| 410 | intr_disable (); | ||
| 411 | thread_block (); | ||
| 412 | |||
| 413 | /* Re-enable interrupts and wait for the next one. | ||
| 414 | |||
| 415 | The `sti' instruction disables interrupts until the | ||
| 416 | completion of the next instruction, so these two | ||
| 417 | instructions are executed atomically. This atomicity is | ||
| 418 | important; otherwise, an interrupt could be handled | ||
| 419 | between re-enabling interrupts and waiting for the next | ||
| 420 | one to occur, wasting as much as one clock tick worth of | ||
| 421 | time. | ||
| 422 | |||
| 423 | See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a] | ||
| 424 | 7.11.1 "HLT Instruction". */ | ||
| 425 | asm volatile ("sti; hlt" : : : "memory"); | ||
| 426 | } | ||
| 427 | } | ||
| 428 | |||
| 429 | /* Function used as the basis for a kernel thread. */ | ||
| 430 | static void | ||
| 431 | kernel_thread (thread_func *function, void *aux) | ||
| 432 | { | ||
| 433 | ASSERT (function != NULL); | ||
| 434 | |||
| 435 | intr_enable (); /* The scheduler runs with interrupts off. */ | ||
| 436 | function (aux); /* Execute the thread function. */ | ||
| 437 | thread_exit (); /* If function() returns, kill the thread. */ | ||
| 438 | } | ||
| 439 | |||
| 440 | /* Returns the running thread. */ | ||
| 441 | struct thread * | ||
| 442 | running_thread (void) | ||
| 443 | { | ||
| 444 | uint32_t *esp; | ||
| 445 | |||
| 446 | /* Copy the CPU's stack pointer into `esp', and then round that | ||
| 447 | down to the start of a page. Because `struct thread' is | ||
| 448 | always at the beginning of a page and the stack pointer is | ||
| 449 | somewhere in the middle, this locates the curent thread. */ | ||
| 450 | asm ("mov %%esp, %0" : "=g" (esp)); | ||
| 451 | return pg_round_down (esp); | ||
| 452 | } | ||
| 453 | |||
| 454 | /* Returns true if T appears to point to a valid thread. */ | ||
| 455 | static bool | ||
| 456 | is_thread (struct thread *t) | ||
| 457 | { | ||
| 458 | return t != NULL && t->magic == THREAD_MAGIC; | ||
| 459 | } | ||
| 460 | |||
| 461 | /* Does basic initialization of T as a blocked thread named | ||
| 462 | NAME. */ | ||
| 463 | static void | ||
| 464 | init_thread (struct thread *t, const char *name, int priority) | ||
| 465 | { | ||
| 466 | ASSERT (t != NULL); | ||
| 467 | ASSERT (PRI_MIN <= priority && priority <= PRI_MAX); | ||
| 468 | ASSERT (name != NULL); | ||
| 469 | |||
| 470 | memset (t, 0, sizeof *t); | ||
| 471 | t->status = THREAD_BLOCKED; | ||
| 472 | strlcpy (t->name, name, sizeof t->name); | ||
| 473 | t->stack = (uint8_t *) t + PGSIZE; | ||
| 474 | t->priority = priority; | ||
| 475 | t->magic = THREAD_MAGIC; | ||
| 476 | list_push_back (&all_list, &t->allelem); | ||
| 477 | #ifdef USERPROG | ||
| 478 | list_init(&t->children); | ||
| 479 | #endif | ||
| 480 | } | ||
| 481 | |||
| 482 | /* Allocates a SIZE-byte frame at the top of thread T's stack and | ||
| 483 | returns a pointer to the frame's base. */ | ||
| 484 | static void * | ||
| 485 | alloc_frame (struct thread *t, size_t size) | ||
| 486 | { | ||
| 487 | /* Stack data is always allocated in word-size units. */ | ||
| 488 | ASSERT (is_thread (t)); | ||
| 489 | ASSERT (size % sizeof (uint32_t) == 0); | ||
| 490 | |||
| 491 | t->stack -= size; | ||
| 492 | return t->stack; | ||
| 493 | } | ||
| 494 | |||
| 495 | /* Chooses and returns the next thread to be scheduled. Should | ||
| 496 | return a thread from the run queue, unless the run queue is | ||
| 497 | empty. (If the running thread can continue running, then it | ||
| 498 | will be in the run queue.) If the run queue is empty, return | ||
| 499 | idle_thread. */ | ||
| 500 | static struct thread * | ||
| 501 | next_thread_to_run (void) | ||
| 502 | { | ||
| 503 | if (list_empty (&ready_list)) | ||
| 504 | return idle_thread; | ||
| 505 | else | ||
| 506 | return list_entry (list_pop_front (&ready_list), struct thread, elem); | ||
| 507 | } | ||
| 508 | |||
| 509 | /* Completes a thread switch by activating the new thread's page | ||
| 510 | tables, and, if the previous thread is dying, destroying it. | ||
| 511 | |||
| 512 | At this function's invocation, we just switched from thread | ||
| 513 | PREV, the new thread is already running, and interrupts are | ||
| 514 | still disabled. This function is normally invoked by | ||
| 515 | thread_schedule() as its final action before returning, but | ||
| 516 | the first time a thread is scheduled it is called by | ||
| 517 | switch_entry() (see switch.S). | ||
| 518 | |||
| 519 | It's not safe to call printf() until the thread switch is | ||
| 520 | complete. In practice that means that printf()s should be | ||
| 521 | added at the end of the function. | ||
| 522 | |||
| 523 | After this function and its caller returns, the thread switch | ||
| 524 | is complete. */ | ||
| 525 | void | ||
| 526 | thread_schedule_tail (struct thread *prev) | ||
| 527 | { | ||
| 528 | struct thread *cur = running_thread (); | ||
| 529 | |||
| 530 | ASSERT (intr_get_level () == INTR_OFF); | ||
| 531 | |||
| 532 | /* Mark us as running. */ | ||
| 533 | cur->status = THREAD_RUNNING; | ||
| 534 | |||
| 535 | /* Start new time slice. */ | ||
| 536 | thread_ticks = 0; | ||
| 537 | |||
| 538 | #ifdef USERPROG | ||
| 539 | /* Activate the new address space. */ | ||
| 540 | process_activate (); | ||
| 541 | #endif | ||
| 542 | |||
| 543 | /* If the thread we switched from is dying, destroy its struct | ||
| 544 | thread. This must happen late so that thread_exit() doesn't | ||
| 545 | pull out the rug under itself. (We don't free | ||
| 546 | initial_thread because its memory was not obtained via | ||
| 547 | palloc().) */ | ||
| 548 | if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) | ||
| 549 | { | ||
| 550 | ASSERT (prev != cur); | ||
| 551 | palloc_free_page (prev); | ||
| 552 | } | ||
| 553 | } | ||
| 554 | |||
| 555 | /* Schedules a new process. At entry, interrupts must be off and | ||
| 556 | the running process's state must have been changed from | ||
| 557 | running to some other state. This function finds another | ||
| 558 | thread to run and switches to it. | ||
| 559 | |||
| 560 | It's not safe to call printf() until thread_schedule_tail() | ||
| 561 | has completed. */ | ||
| 562 | static void | ||
| 563 | schedule (void) | ||
| 564 | { | ||
| 565 | struct thread *cur = running_thread (); | ||
| 566 | struct thread *next = next_thread_to_run (); | ||
| 567 | struct thread *prev = NULL; | ||
| 568 | |||
| 569 | ASSERT (intr_get_level () == INTR_OFF); | ||
| 570 | ASSERT (cur->status != THREAD_RUNNING); | ||
| 571 | ASSERT (is_thread (next)); | ||
| 572 | |||
| 573 | if (cur != next) | ||
| 574 | prev = switch_threads (cur, next); | ||
| 575 | thread_schedule_tail (prev); | ||
| 576 | } | ||
| 577 | |||
| 578 | /* Returns a tid to use for a new thread. */ | ||
| 579 | static tid_t | ||
| 580 | allocate_tid (void) | ||
| 581 | { | ||
| 582 | static tid_t next_tid = 1; | ||
| 583 | tid_t tid; | ||
| 584 | |||
| 585 | lock_acquire (&tid_lock); | ||
| 586 | tid = next_tid++; | ||
| 587 | lock_release (&tid_lock); | ||
| 588 | |||
| 589 | return tid; | ||
| 590 | } | ||
| 591 | |||
| 592 | /* Offset of `stack' member within `struct thread'. | ||
| 593 | Used by switch.S, which can't figure it out on its own. */ | ||
| 594 | uint32_t thread_stack_ofs = offsetof (struct thread, stack); | ||
