summaryrefslogtreecommitdiffstats
path: root/threads
diff options
context:
space:
mode:
Diffstat (limited to 'threads')
-rw-r--r--threads/synch.c71
-rw-r--r--threads/synch.h2
-rw-r--r--threads/thread.c86
-rw-r--r--threads/thread.h10
4 files changed, 154 insertions, 15 deletions
diff --git a/threads/synch.c b/threads/synch.c
index 317c68a..1f15bb9 100644
--- a/threads/synch.c
+++ b/threads/synch.c
@@ -32,6 +32,9 @@
32#include "threads/interrupt.h" 32#include "threads/interrupt.h"
33#include "threads/thread.h" 33#include "threads/thread.h"
34 34
35static bool lock_list_priority_cmp_greater(const struct list_elem *a, const struct list_elem *b,
36 void *AUX UNUSED);
37
35/* Initializes semaphore SEMA to VALUE. A semaphore is a 38/* Initializes semaphore SEMA to VALUE. A semaphore is a
36 nonnegative integer along with two atomic operators for 39 nonnegative integer along with two atomic operators for
37 manipulating it: 40 manipulating it:
@@ -68,7 +71,8 @@ sema_down (struct semaphore *sema)
68 old_level = intr_disable (); 71 old_level = intr_disable ();
69 while (sema->value == 0) 72 while (sema->value == 0)
70 { 73 {
71 list_push_back (&sema->waiters, &thread_current ()->elem); 74 /* add thread to our ordered waiters list */
75 list_insert_ordered (&sema->waiters, &thread_current ()->elem, &priority_list_cmp_greater, NULL);
72 thread_block (); 76 thread_block ();
73 } 77 }
74 sema->value--; 78 sema->value--;
@@ -181,6 +185,17 @@ lock_init (struct lock *lock)
181 sema_init (&lock->semaphore, 1); 185 sema_init (&lock->semaphore, 1);
182} 186}
183 187
188/* comparison function for our descending ordered lock list. */
189static bool
190lock_list_priority_cmp_greater(const struct list_elem *a, const struct list_elem *b,
191 void *AUX UNUSED)
192{
193 struct lock *l1 = list_entry (a, struct lock, elem);
194 struct lock *l2 = list_entry (b, struct lock, elem);
195 /* descending order - put newer in front */
196 return (l1->priority >= l2->priority);
197}
198
184/* Acquires LOCK, sleeping until it becomes available if 199/* Acquires LOCK, sleeping until it becomes available if
185 necessary. The lock must not already be held by the current 200 necessary. The lock must not already be held by the current
186 thread. 201 thread.
@@ -192,12 +207,41 @@ lock_init (struct lock *lock)
192void 207void
193lock_acquire (struct lock *lock) 208lock_acquire (struct lock *lock)
194{ 209{
210 struct thread *cur = thread_current ();
211
195 ASSERT (lock != NULL); 212 ASSERT (lock != NULL);
196 ASSERT (!intr_context ()); 213 ASSERT (!intr_context ());
197 ASSERT (!lock_held_by_current_thread (lock)); 214 ASSERT (!lock_held_by_current_thread (lock));
198 215
216 if (lock->holder != NULL)
217 {
218 //printf("[%d] acquire: lock=%p prio=%d\n", cur->tid, lock, cur->priority);
219
220 /* check for a possible priority donation */
221 if (cur->priority > lock->priority)
222 {
223 /* the new priority of this lock is our own priority */
224 lock->priority = cur->priority;
225
226 /* the lock priority changed so we need to sort the lock list of the lock holder */
227 list_remove (&lock->elem);
228 list_insert_ordered (&lock->holder->locks, &lock->elem,
229 lock_list_priority_cmp_greater, NULL);
230
231 /* finally donate our priority */
232 thread_donate_priority (cur, lock->holder);
233 }
234 }
235
199 sema_down (&lock->semaphore); 236 sema_down (&lock->semaphore);
200 lock->holder = thread_current (); 237 lock->holder = cur;
238
239 /* the priority of this lock is our own priority */
240 lock->priority = cur->priority;
241
242 /* save the locks we hold in descending order of their priority */
243 list_insert_ordered (&lock->holder->locks, &lock->elem,
244 lock_list_priority_cmp_greater, NULL);
201} 245}
202 246
203/* Tries to acquires LOCK and returns true if successful or false 247/* Tries to acquires LOCK and returns true if successful or false
@@ -228,11 +272,31 @@ lock_try_acquire (struct lock *lock)
228void 272void
229lock_release (struct lock *lock) 273lock_release (struct lock *lock)
230{ 274{
275 struct thread *cur = thread_current ();
276
231 ASSERT (lock != NULL); 277 ASSERT (lock != NULL);
232 ASSERT (lock_held_by_current_thread (lock)); 278 ASSERT (lock_held_by_current_thread (lock));
233 279
234 lock->holder = NULL; 280 lock->holder = NULL;
281 //lock->priority = we don't care as the next lock_acquire ()-call does
235 sema_up (&lock->semaphore); 282 sema_up (&lock->semaphore);
283
284 /* we don't hold the lock any longer so remove it from the lock list */
285 list_remove (&lock->elem);
286
287 if (list_empty (&cur->locks))
288 {
289 //printf("[%d] restoring prio from %d to %d\n", cur->tid, cur->priority, cur->saved_priority);
290 /* we don't hold another lock so restore our base priority */
291 thread_donation_pass_back ();
292 }
293 else
294 {
295 /* we still hold at least one lock so change our priority to the priority of the lock */
296 struct lock *lock2 = list_entry (list_front (&cur->locks), struct lock, elem);
297 /* (forced) priority change */
298 thread_other_set_priority (cur, lock2->priority);
299 }
236} 300}
237 301
238/* Returns true if the current thread holds LOCK, false 302/* Returns true if the current thread holds LOCK, false
@@ -295,7 +359,8 @@ cond_wait (struct condition *cond, struct lock *lock)
295 ASSERT (lock_held_by_current_thread (lock)); 359 ASSERT (lock_held_by_current_thread (lock));
296 360
297 sema_init (&waiter.semaphore, 0); 361 sema_init (&waiter.semaphore, 0);
298 list_push_back (&cond->waiters, &waiter.elem); 362 /* add thread to our ordered waiters list */
363 list_insert_ordered (&cond->waiters, &waiter.elem, &priority_list_cmp_greater, NULL);
299 lock_release (lock); 364 lock_release (lock);
300 sema_down (&waiter.semaphore); 365 sema_down (&waiter.semaphore);
301 lock_acquire (lock); 366 lock_acquire (lock);
diff --git a/threads/synch.h b/threads/synch.h
index a19e88b..cdf415c 100644
--- a/threads/synch.h
+++ b/threads/synch.h
@@ -22,6 +22,8 @@ struct lock
22 { 22 {
23 struct thread *holder; /* Thread holding lock (for debugging). */ 23 struct thread *holder; /* Thread holding lock (for debugging). */
24 struct semaphore semaphore; /* Binary semaphore controlling access. */ 24 struct semaphore semaphore; /* Binary semaphore controlling access. */
25 int priority; /* the priority of the thread with the highest priority currently acquiring the lock */
26 struct list_elem elem; /* list element for (struct thread)->locks */
25 }; 27 };
26 28
27void lock_init (struct lock *); 29void lock_init (struct lock *);
diff --git a/threads/thread.c b/threads/thread.c
index 3fefbd8..17d21bd 100644
--- a/threads/thread.c
+++ b/threads/thread.c
@@ -71,9 +71,9 @@ static void schedule (void);
71void thread_schedule_tail (struct thread *prev); 71void thread_schedule_tail (struct thread *prev);
72static tid_t allocate_tid (void); 72static tid_t allocate_tid (void);
73 73
74//TODO: should take donated priority into account 74/* comparison function for our descending ordered ready list. */
75static bool 75bool
76ready_list_cmp_less(const struct list_elem *a, const struct list_elem *b, 76priority_list_cmp_greater(const struct list_elem *a, const struct list_elem *b,
77 void *AUX UNUSED) 77 void *AUX UNUSED)
78{ 78{
79 struct thread *t1 = list_entry (a, struct thread, elem); 79 struct thread *t1 = list_entry (a, struct thread, elem);
@@ -222,7 +222,10 @@ thread_create (const char *name, int priority,
222 222
223 /* Add to run queue. */ 223 /* Add to run queue. */
224 thread_unblock (t); 224 thread_unblock (t);
225 thread_yield (); 225
226 /* and yield the cpu in case we have a higher priority */
227 if (t->priority > thread_current ()->priority)
228 thread_yield ();
226 229
227 return tid; 230 return tid;
228} 231}
@@ -261,7 +264,7 @@ thread_unblock (struct thread *t)
261 old_level = intr_disable (); 264 old_level = intr_disable ();
262 ASSERT (t->status == THREAD_BLOCKED); 265 ASSERT (t->status == THREAD_BLOCKED);
263 /* add thread to our ordered ready_list */ 266 /* add thread to our ordered ready_list */
264 list_insert_ordered (&ready_list, &t->elem, &ready_list_cmp_less, NULL); 267 list_insert_ordered (&ready_list, &t->elem, &priority_list_cmp_greater, NULL);
265 t->status = THREAD_READY; 268 t->status = THREAD_READY;
266 intr_set_level (old_level); 269 intr_set_level (old_level);
267} 270}
@@ -332,7 +335,7 @@ thread_yield (void)
332 335
333 old_level = intr_disable (); 336 old_level = intr_disable ();
334 if (cur != idle_thread) 337 if (cur != idle_thread)
335 list_insert_ordered (&ready_list, &cur->elem, &ready_list_cmp_less, NULL); 338 list_insert_ordered (&ready_list, &cur->elem, &priority_list_cmp_greater, NULL);
336 cur->status = THREAD_READY; 339 cur->status = THREAD_READY;
337 schedule (); 340 schedule ();
338 intr_set_level (old_level); 341 intr_set_level (old_level);
@@ -359,14 +362,71 @@ thread_foreach (thread_action_func *func, void *aux)
359void 362void
360thread_set_priority (int new_priority) 363thread_set_priority (int new_priority)
361{ 364{
362 thread_current ()->priority = new_priority; 365 struct thread *cur = thread_current ();
366 if (cur->priority == new_priority)
367 return;
368
369 if (cur->is_donated)
370 {
371 cur->saved_priority = new_priority;
372 /* make sure we don't lower a donated priority */
373 if (new_priority < cur->priority)
374 return;
375 }
376 thread_other_set_priority (cur, new_priority);
377}
363 378
364 /* compare priority with the highest priority in the list */ 379void
365 if (!list_empty (&ready_list)) 380thread_other_set_priority (struct thread *t, int new_priority)
381{
382 ASSERT (is_thread (t));
383
384 if (t->priority == new_priority)
385 return;
386
387 t->priority = new_priority;
388
389 if (t->status == THREAD_READY)
390 {
391 /* sort our ordered list */
392 list_remove (&t->elem);
393 list_insert_ordered (&ready_list, &t->elem, &priority_list_cmp_greater, NULL);
394 }
395 else if (t->status == THREAD_RUNNING && !list_empty (&ready_list))
396 {
397 /* compare priority with the highest priority in the list */
398 struct thread *t2 = list_entry (list_front (&ready_list), struct thread, elem);
399 if (t2->priority > t->priority)
400 thread_yield ();
401 }
402}
403
404void
405thread_donate_priority (struct thread *donator, struct thread *donee)
406{
407 ASSERT (is_thread (donator));
408 ASSERT (is_thread (donee));
409
410 /* make sure we don't lower a donated priority */
411 if (donator->priority < donee->priority)
412 return;
413
414 /* store our base priority */
415 if (!donee->is_donated)
416 {
417 donee->saved_priority = donee->priority;
418 donee->is_donated = true;
419 }
420 thread_other_set_priority (donee, donator->priority);
421}
422
423void
424thread_donation_pass_back (void)
425{
426 if (thread_current ()->is_donated)
366 { 427 {
367 struct thread *t = list_entry (list_front (&ready_list), struct thread, elem); 428 thread_current ()->is_donated = false;
368 if (t->priority > thread_current ()->priority) 429 thread_set_priority (thread_current ()->saved_priority);
369 thread_yield();
370 } 430 }
371} 431}
372 432
@@ -497,6 +557,8 @@ init_thread (struct thread *t, const char *name, int priority)
497#ifdef USERPROG 557#ifdef USERPROG
498 list_init(&t->children); 558 list_init(&t->children);
499#endif 559#endif
560 t->is_donated = false;
561 list_init (&t->locks);
500} 562}
501 563
502/* Allocates a SIZE-byte frame at the top of thread T's stack and 564/* Allocates a SIZE-byte frame at the top of thread T's stack and
diff --git a/threads/thread.h b/threads/thread.h
index 4ba5ef2..a6ef25d 100644
--- a/threads/thread.h
+++ b/threads/thread.h
@@ -107,6 +107,10 @@ struct thread
107 unsigned magic; /* Detects stack overflow. */ 107 unsigned magic; /* Detects stack overflow. */
108 108
109 int64_t wakeup_tick; /* absolute tick when to wake up the thread */ 109 int64_t wakeup_tick; /* absolute tick when to wake up the thread */
110
111 int is_donated; /* flag if threads priority has been donated by another thread */
112 int saved_priority; /* saved base priority in case of priority dontation */
113 struct list locks; /* list of locks we currently hold */
110 }; 114 };
111 115
112/* If false (default), use round-robin scheduler. 116/* If false (default), use round-robin scheduler.
@@ -114,6 +118,9 @@ struct thread
114 Controlled by kernel command-line option "-o mlfqs". */ 118 Controlled by kernel command-line option "-o mlfqs". */
115extern bool thread_mlfqs; 119extern bool thread_mlfqs;
116 120
121bool priority_list_cmp_greater (const struct list_elem *a, const struct list_elem *b,
122 void *aux);
123
117void thread_init (void); 124void thread_init (void);
118void thread_start (void); 125void thread_start (void);
119 126
@@ -139,6 +146,9 @@ void thread_foreach (thread_action_func *, void *);
139 146
140int thread_get_priority (void); 147int thread_get_priority (void);
141void thread_set_priority (int); 148void thread_set_priority (int);
149void thread_other_set_priority (struct thread *t, int new_priority);
150void thread_donate_priority (struct thread *donator, struct thread *donee);
151void thread_donation_pass_back (void);
142 152
143int thread_get_nice (void); 153int thread_get_nice (void);
144void thread_set_nice (int); 154void thread_set_nice (int);