From 158ea87fba909161a084c29feff29a168725fe4e Mon Sep 17 00:00:00 2001 From: manuel Date: Mon, 7 May 2012 18:01:20 +0200 Subject: first priority donation implementation --- threads/synch.c | 71 ++++++++++++++++++++++++++++++++++++++++++++-- threads/synch.h | 2 ++ threads/thread.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++-------- threads/thread.h | 10 +++++++ 4 files changed, 154 insertions(+), 15 deletions(-) (limited to 'threads') diff --git a/threads/synch.c b/threads/synch.c index 317c68a..1f15bb9 100644 --- a/threads/synch.c +++ b/threads/synch.c @@ -32,6 +32,9 @@ #include "threads/interrupt.h" #include "threads/thread.h" +static bool lock_list_priority_cmp_greater(const struct list_elem *a, const struct list_elem *b, + void *AUX UNUSED); + /* Initializes semaphore SEMA to VALUE. A semaphore is a nonnegative integer along with two atomic operators for manipulating it: @@ -68,7 +71,8 @@ sema_down (struct semaphore *sema) old_level = intr_disable (); while (sema->value == 0) { - list_push_back (&sema->waiters, &thread_current ()->elem); + /* add thread to our ordered waiters list */ + list_insert_ordered (&sema->waiters, &thread_current ()->elem, &priority_list_cmp_greater, NULL); thread_block (); } sema->value--; @@ -181,6 +185,17 @@ lock_init (struct lock *lock) sema_init (&lock->semaphore, 1); } +/* comparison function for our descending ordered lock list. */ +static bool +lock_list_priority_cmp_greater(const struct list_elem *a, const struct list_elem *b, + void *AUX UNUSED) +{ + struct lock *l1 = list_entry (a, struct lock, elem); + struct lock *l2 = list_entry (b, struct lock, elem); + /* descending order - put newer in front */ + return (l1->priority >= l2->priority); +} + /* Acquires LOCK, sleeping until it becomes available if necessary. The lock must not already be held by the current thread. @@ -192,12 +207,41 @@ lock_init (struct lock *lock) void lock_acquire (struct lock *lock) { + struct thread *cur = thread_current (); + ASSERT (lock != NULL); ASSERT (!intr_context ()); ASSERT (!lock_held_by_current_thread (lock)); + if (lock->holder != NULL) + { + //printf("[%d] acquire: lock=%p prio=%d\n", cur->tid, lock, cur->priority); + + /* check for a possible priority donation */ + if (cur->priority > lock->priority) + { + /* the new priority of this lock is our own priority */ + lock->priority = cur->priority; + + /* the lock priority changed so we need to sort the lock list of the lock holder */ + list_remove (&lock->elem); + list_insert_ordered (&lock->holder->locks, &lock->elem, + lock_list_priority_cmp_greater, NULL); + + /* finally donate our priority */ + thread_donate_priority (cur, lock->holder); + } + } + sema_down (&lock->semaphore); - lock->holder = thread_current (); + lock->holder = cur; + + /* the priority of this lock is our own priority */ + lock->priority = cur->priority; + + /* save the locks we hold in descending order of their priority */ + list_insert_ordered (&lock->holder->locks, &lock->elem, + lock_list_priority_cmp_greater, NULL); } /* Tries to acquires LOCK and returns true if successful or false @@ -228,11 +272,31 @@ lock_try_acquire (struct lock *lock) void lock_release (struct lock *lock) { + struct thread *cur = thread_current (); + ASSERT (lock != NULL); ASSERT (lock_held_by_current_thread (lock)); lock->holder = NULL; + //lock->priority = we don't care as the next lock_acquire ()-call does sema_up (&lock->semaphore); + + /* we don't hold the lock any longer so remove it from the lock list */ + list_remove (&lock->elem); + + if (list_empty (&cur->locks)) + { + //printf("[%d] restoring prio from %d to %d\n", cur->tid, cur->priority, cur->saved_priority); + /* we don't hold another lock so restore our base priority */ + thread_donation_pass_back (); + } + else + { + /* we still hold at least one lock so change our priority to the priority of the lock */ + struct lock *lock2 = list_entry (list_front (&cur->locks), struct lock, elem); + /* (forced) priority change */ + thread_other_set_priority (cur, lock2->priority); + } } /* Returns true if the current thread holds LOCK, false @@ -295,7 +359,8 @@ cond_wait (struct condition *cond, struct lock *lock) ASSERT (lock_held_by_current_thread (lock)); sema_init (&waiter.semaphore, 0); - list_push_back (&cond->waiters, &waiter.elem); + /* add thread to our ordered waiters list */ + list_insert_ordered (&cond->waiters, &waiter.elem, &priority_list_cmp_greater, NULL); lock_release (lock); sema_down (&waiter.semaphore); lock_acquire (lock); diff --git a/threads/synch.h b/threads/synch.h index a19e88b..cdf415c 100644 --- a/threads/synch.h +++ b/threads/synch.h @@ -22,6 +22,8 @@ struct lock { struct thread *holder; /* Thread holding lock (for debugging). */ struct semaphore semaphore; /* Binary semaphore controlling access. */ + int priority; /* the priority of the thread with the highest priority currently acquiring the lock */ + struct list_elem elem; /* list element for (struct thread)->locks */ }; void lock_init (struct lock *); diff --git a/threads/thread.c b/threads/thread.c index 3fefbd8..17d21bd 100644 --- a/threads/thread.c +++ b/threads/thread.c @@ -71,9 +71,9 @@ static void schedule (void); void thread_schedule_tail (struct thread *prev); static tid_t allocate_tid (void); -//TODO: should take donated priority into account -static bool -ready_list_cmp_less(const struct list_elem *a, const struct list_elem *b, +/* comparison function for our descending ordered ready list. */ +bool +priority_list_cmp_greater(const struct list_elem *a, const struct list_elem *b, void *AUX UNUSED) { struct thread *t1 = list_entry (a, struct thread, elem); @@ -222,7 +222,10 @@ thread_create (const char *name, int priority, /* Add to run queue. */ thread_unblock (t); - thread_yield (); + + /* and yield the cpu in case we have a higher priority */ + if (t->priority > thread_current ()->priority) + thread_yield (); return tid; } @@ -261,7 +264,7 @@ thread_unblock (struct thread *t) old_level = intr_disable (); ASSERT (t->status == THREAD_BLOCKED); /* add thread to our ordered ready_list */ - list_insert_ordered (&ready_list, &t->elem, &ready_list_cmp_less, NULL); + list_insert_ordered (&ready_list, &t->elem, &priority_list_cmp_greater, NULL); t->status = THREAD_READY; intr_set_level (old_level); } @@ -332,7 +335,7 @@ thread_yield (void) old_level = intr_disable (); if (cur != idle_thread) - list_insert_ordered (&ready_list, &cur->elem, &ready_list_cmp_less, NULL); + list_insert_ordered (&ready_list, &cur->elem, &priority_list_cmp_greater, NULL); cur->status = THREAD_READY; schedule (); intr_set_level (old_level); @@ -359,14 +362,71 @@ thread_foreach (thread_action_func *func, void *aux) void thread_set_priority (int new_priority) { - thread_current ()->priority = new_priority; + struct thread *cur = thread_current (); + if (cur->priority == new_priority) + return; + + if (cur->is_donated) + { + cur->saved_priority = new_priority; + /* make sure we don't lower a donated priority */ + if (new_priority < cur->priority) + return; + } + thread_other_set_priority (cur, new_priority); +} - /* compare priority with the highest priority in the list */ - if (!list_empty (&ready_list)) +void +thread_other_set_priority (struct thread *t, int new_priority) +{ + ASSERT (is_thread (t)); + + if (t->priority == new_priority) + return; + + t->priority = new_priority; + + if (t->status == THREAD_READY) + { + /* sort our ordered list */ + list_remove (&t->elem); + list_insert_ordered (&ready_list, &t->elem, &priority_list_cmp_greater, NULL); + } + else if (t->status == THREAD_RUNNING && !list_empty (&ready_list)) + { + /* compare priority with the highest priority in the list */ + struct thread *t2 = list_entry (list_front (&ready_list), struct thread, elem); + if (t2->priority > t->priority) + thread_yield (); + } +} + +void +thread_donate_priority (struct thread *donator, struct thread *donee) +{ + ASSERT (is_thread (donator)); + ASSERT (is_thread (donee)); + + /* make sure we don't lower a donated priority */ + if (donator->priority < donee->priority) + return; + + /* store our base priority */ + if (!donee->is_donated) + { + donee->saved_priority = donee->priority; + donee->is_donated = true; + } + thread_other_set_priority (donee, donator->priority); +} + +void +thread_donation_pass_back (void) +{ + if (thread_current ()->is_donated) { - struct thread *t = list_entry (list_front (&ready_list), struct thread, elem); - if (t->priority > thread_current ()->priority) - thread_yield(); + thread_current ()->is_donated = false; + thread_set_priority (thread_current ()->saved_priority); } } @@ -497,6 +557,8 @@ init_thread (struct thread *t, const char *name, int priority) #ifdef USERPROG list_init(&t->children); #endif + t->is_donated = false; + list_init (&t->locks); } /* Allocates a SIZE-byte frame at the top of thread T's stack and diff --git a/threads/thread.h b/threads/thread.h index 4ba5ef2..a6ef25d 100644 --- a/threads/thread.h +++ b/threads/thread.h @@ -107,6 +107,10 @@ struct thread unsigned magic; /* Detects stack overflow. */ int64_t wakeup_tick; /* absolute tick when to wake up the thread */ + + int is_donated; /* flag if threads priority has been donated by another thread */ + int saved_priority; /* saved base priority in case of priority dontation */ + struct list locks; /* list of locks we currently hold */ }; /* If false (default), use round-robin scheduler. @@ -114,6 +118,9 @@ struct thread Controlled by kernel command-line option "-o mlfqs". */ extern bool thread_mlfqs; +bool priority_list_cmp_greater (const struct list_elem *a, const struct list_elem *b, + void *aux); + void thread_init (void); void thread_start (void); @@ -139,6 +146,9 @@ void thread_foreach (thread_action_func *, void *); int thread_get_priority (void); void thread_set_priority (int); +void thread_other_set_priority (struct thread *t, int new_priority); +void thread_donate_priority (struct thread *donator, struct thread *donee); +void thread_donation_pass_back (void); int thread_get_nice (void); void thread_set_nice (int); -- cgit v1.2.3