diff options
| author | manuel <manuel@mausz.at> | 2012-05-11 20:48:58 +0200 |
|---|---|---|
| committer | manuel <manuel@mausz.at> | 2012-05-11 20:48:58 +0200 |
| commit | 9b19a84ed640cb15e6ca87ddc025c90d78a5fbdc (patch) | |
| tree | 54345ae3ba366d1a4663e93b6b883e842f7a3918 /threads | |
| parent | 4571d64e74dadd1b2169e8cd276f8a298decd5f0 (diff) | |
| download | progos-9b19a84ed640cb15e6ca87ddc025c90d78a5fbdc.tar.gz progos-9b19a84ed640cb15e6ca87ddc025c90d78a5fbdc.tar.bz2 progos-9b19a84ed640cb15e6ca87ddc025c90d78a5fbdc.zip | |
* fix possible race in thread_set_priority
* fill in same parts of proj1.txt
* use thread_get_priority() whenever possible
Diffstat (limited to 'threads')
| -rw-r--r-- | threads/synch.c | 16 | ||||
| -rw-r--r-- | threads/thread.c | 21 |
2 files changed, 28 insertions, 9 deletions
diff --git a/threads/synch.c b/threads/synch.c index 98905b1..a1b7549 100644 --- a/threads/synch.c +++ b/threads/synch.c | |||
| @@ -128,7 +128,7 @@ sema_up (struct semaphore *sema) | |||
| 128 | list_sort (&sema->waiters, &thread_priority_cmp_greater, NULL); | 128 | list_sort (&sema->waiters, &thread_priority_cmp_greater, NULL); |
| 129 | struct thread *t = list_entry (list_pop_front (&sema->waiters), struct thread, elem); | 129 | struct thread *t = list_entry (list_pop_front (&sema->waiters), struct thread, elem); |
| 130 | /* thread_unblock() doesn't yield so check if there's a need */ | 130 | /* thread_unblock() doesn't yield so check if there's a need */ |
| 131 | if (t->priority > thread_current ()->priority) | 131 | if (t->priority > thread_get_priority ()) |
| 132 | yield = true; | 132 | yield = true; |
| 133 | thread_unblock (t); | 133 | thread_unblock (t); |
| 134 | } | 134 | } |
| @@ -240,10 +240,10 @@ lock_acquire (struct lock *lock) | |||
| 240 | while (blocked_lock != NULL && blocked_lock->holder != NULL) | 240 | while (blocked_lock != NULL && blocked_lock->holder != NULL) |
| 241 | { | 241 | { |
| 242 | /* check for a possible priority donation */ | 242 | /* check for a possible priority donation */ |
| 243 | if (cur->priority > blocked_lock->priority) | 243 | if (thread_get_priority () > blocked_lock->priority) |
| 244 | { | 244 | { |
| 245 | /* the new priority of this lock is our own priority */ | 245 | /* the new priority of this lock is our own priority */ |
| 246 | blocked_lock->priority = cur->priority; | 246 | blocked_lock->priority = thread_get_priority (); |
| 247 | 247 | ||
| 248 | /* the lock priority changed so we need to sort the lock list of the lock holder */ | 248 | /* the lock priority changed so we need to sort the lock list of the lock holder */ |
| 249 | list_remove (&blocked_lock->elem); | 249 | list_remove (&blocked_lock->elem); |
| @@ -264,7 +264,7 @@ lock_acquire (struct lock *lock) | |||
| 264 | cur->blocked_lock = NULL; | 264 | cur->blocked_lock = NULL; |
| 265 | 265 | ||
| 266 | /* the priority of this lock is our own priority */ | 266 | /* the priority of this lock is our own priority */ |
| 267 | lock->priority = cur->priority; | 267 | lock->priority = thread_get_priority (); |
| 268 | 268 | ||
| 269 | /* save the locks we hold in descending order of their priority */ | 269 | /* save the locks we hold in descending order of their priority */ |
| 270 | list_insert_ordered (&lock->holder->locks, &lock->elem, | 270 | list_insert_ordered (&lock->holder->locks, &lock->elem, |
| @@ -305,7 +305,7 @@ lock_release (struct lock *lock) | |||
| 305 | ASSERT (lock_held_by_current_thread (lock)); | 305 | ASSERT (lock_held_by_current_thread (lock)); |
| 306 | 306 | ||
| 307 | lock->holder = NULL; | 307 | lock->holder = NULL; |
| 308 | //lock->priority = we don't care as the next lock_acquire()-call does | 308 | //lock->priority = we don't care since the next lock_acquire()-call does |
| 309 | sema_up (&lock->semaphore); | 309 | sema_up (&lock->semaphore); |
| 310 | 310 | ||
| 311 | /* we don't hold the lock any longer so remove it from the lock list */ | 311 | /* we don't hold the lock any longer so remove it from the lock list */ |
| @@ -424,9 +424,9 @@ cond_signal (struct condition *cond, struct lock *lock UNUSED) | |||
| 424 | this is the same as with sema_down()/sema_up() */ | 424 | this is the same as with sema_down()/sema_up() */ |
| 425 | if (!list_empty (&cond->waiters)) | 425 | if (!list_empty (&cond->waiters)) |
| 426 | { | 426 | { |
| 427 | list_sort (&cond->waiters, &semaphore_priority_cmp_greater, NULL); | 427 | list_sort (&cond->waiters, &semaphore_priority_cmp_greater, NULL); |
| 428 | sema_up (&list_entry (list_pop_front (&cond->waiters), | 428 | sema_up (&list_entry (list_pop_front (&cond->waiters), |
| 429 | struct semaphore_elem, elem)->semaphore); | 429 | struct semaphore_elem, elem)->semaphore); |
| 430 | } | 430 | } |
| 431 | } | 431 | } |
| 432 | 432 | ||
diff --git a/threads/thread.c b/threads/thread.c index 61ab5d9..cf404b6 100644 --- a/threads/thread.c +++ b/threads/thread.c | |||
| @@ -379,12 +379,26 @@ thread_set_priority (int new_priority) | |||
| 379 | void | 379 | void |
| 380 | thread_other_set_priority (struct thread *t, int new_priority) | 380 | thread_other_set_priority (struct thread *t, int new_priority) |
| 381 | { | 381 | { |
| 382 | enum intr_level old_level; | ||
| 383 | bool yield = false; | ||
| 384 | |||
| 382 | ASSERT (is_thread (t)); | 385 | ASSERT (is_thread (t)); |
| 383 | ASSERT (new_priority >= PRI_MIN && new_priority <= PRI_MAX); | 386 | ASSERT (new_priority >= PRI_MIN && new_priority <= PRI_MAX); |
| 384 | 387 | ||
| 385 | if (t->priority == new_priority) | 388 | if (t->priority == new_priority) |
| 386 | return; | 389 | return; |
| 387 | 390 | ||
| 391 | /* we need to disable interrupts here because after changing the thread's | ||
| 392 | priority and a possible timer interrupt which triggers thread_yield() we | ||
| 393 | could run into a couple of races. e.g. no proper sorted ready list, | ||
| 394 | corrupt ready list (wrong internal pointers) or the timer interrupt fires | ||
| 395 | after calling list_empty() but before list_front() while having only two | ||
| 396 | threads (one active, the other one ready). the scheduler then schedules the | ||
| 397 | other thread which runs and terminates, thus removing itself from all lists. | ||
| 398 | after that our thread gets scheduled again and reads from an empty list, | ||
| 399 | although it wasn't empty before. */ | ||
| 400 | old_level = intr_disable (); | ||
| 401 | |||
| 388 | t->priority = new_priority; | 402 | t->priority = new_priority; |
| 389 | 403 | ||
| 390 | if (t->status == THREAD_READY) | 404 | if (t->status == THREAD_READY) |
| @@ -398,8 +412,13 @@ thread_other_set_priority (struct thread *t, int new_priority) | |||
| 398 | /* compare priority with the highest priority in the list */ | 412 | /* compare priority with the highest priority in the list */ |
| 399 | struct thread *t2 = list_entry (list_front (&ready_list), struct thread, elem); | 413 | struct thread *t2 = list_entry (list_front (&ready_list), struct thread, elem); |
| 400 | if (t2->priority > t->priority) | 414 | if (t2->priority > t->priority) |
| 401 | thread_yield (); | 415 | yield = true; |
| 402 | } | 416 | } |
| 417 | |||
| 418 | intr_set_level (old_level); | ||
| 419 | |||
| 420 | if (yield) | ||
| 421 | thread_yield (); | ||
| 403 | } | 422 | } |
| 404 | 423 | ||
| 405 | void | 424 | void |
