diff -urpN -X /home/fletch/.diff.exclude 713-fs_aio_3_write/arch/i386/kernel/i386_ksyms.c 714-fs_aio_4_down_wq/arch/i386/kernel/i386_ksyms.c
--- 713-fs_aio_3_write/arch/i386/kernel/i386_ksyms.c	Fri May 30 19:24:56 2003
+++ 714-fs_aio_4_down_wq/arch/i386/kernel/i386_ksyms.c	Fri May 30 22:04:52 2003
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(apm_info);
 EXPORT_SYMBOL(__io_virt_debug);
 #endif
 
-EXPORT_SYMBOL_NOVERS(__down_failed);
+EXPORT_SYMBOL_NOVERS(__down_failed_wq);
 EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
 EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
 EXPORT_SYMBOL_NOVERS(__up_wakeup);
diff -urpN -X /home/fletch/.diff.exclude 713-fs_aio_3_write/arch/i386/kernel/semaphore.c 714-fs_aio_4_down_wq/arch/i386/kernel/semaphore.c
--- 713-fs_aio_3_write/arch/i386/kernel/semaphore.c	Sun Dec  1 10:00:12 2002
+++ 714-fs_aio_4_down_wq/arch/i386/kernel/semaphore.c	Fri May 30 22:04:52 2003
@@ -15,6 +15,7 @@
 #include <linux/config.h>
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <linux/errno.h>
 #include <asm/semaphore.h>
 
 /*
@@ -53,15 +54,20 @@ void __up(struct semaphore *sem)
 	wake_up(&sem->wait);
 }
 
-void __down(struct semaphore * sem)
+int __down_wq(struct semaphore * sem, wait_queue_t *wait)
 {
 	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
+	DECLARE_WAITQUEUE(local_wait, tsk);
 	unsigned long flags;
 
-	tsk->state = TASK_UNINTERRUPTIBLE;
+	if (is_sync_wait(wait))
+		tsk->state = TASK_UNINTERRUPTIBLE;
+	if (!wait) {
+		wait = &local_wait;
+	}
+
 	spin_lock_irqsave(&sem->wait.lock, flags);
-	add_wait_queue_exclusive_locked(&sem->wait, &wait);
+	add_wait_queue_exclusive_locked(&sem->wait, wait);
 
 	sem->sleepers++;
 	for (;;) {
@@ -79,17 +85,23 @@ void __down(struct semaphore * sem)
 		sem->sleepers = 1;	/* us - see -1 above */
 		spin_unlock_irqrestore(&sem->wait.lock, flags);
 
+		if (!is_sync_wait(wait))
+			return -EIOCBRETRY;
+
 		schedule();
 
 		spin_lock_irqsave(&sem->wait.lock, flags);
 		tsk->state = TASK_UNINTERRUPTIBLE;
 	}
-	remove_wait_queue_locked(&sem->wait, &wait);
+	if (is_sync_wait(wait) || !list_empty(&wait->task_list))
+		remove_wait_queue_locked(&sem->wait, wait);
 	wake_up_locked(&sem->wait);
 	spin_unlock_irqrestore(&sem->wait.lock, flags);
 	tsk->state = TASK_RUNNING;
+	return 0;
 }
 
+
 int __down_interruptible(struct semaphore * sem)
 {
 	int retval = 0;
@@ -189,19 +201,17 @@ int __down_trylock(struct semaphore * se
 asm(
 ".text\n"
 ".align 4\n"
-".globl __down_failed\n"
-"__down_failed:\n\t"
+".globl __down_failed_wq\n"
+"__down_failed_wq:\n\t"
 #if defined(CONFIG_FRAME_POINTER)
 	"pushl %ebp\n\t"
 	"movl  %esp,%ebp\n\t"
 #endif
-	"pushl %eax\n\t"
 	"pushl %edx\n\t"
 	"pushl %ecx\n\t"
-	"call __down\n\t"
+	"call __down_wq\n\t"
 	"popl %ecx\n\t"
 	"popl %edx\n\t"
-	"popl %eax\n\t"
 #if defined(CONFIG_FRAME_POINTER)
 	"movl %ebp,%esp\n\t"
 	"popl %ebp\n\t"
diff -urpN -X /home/fletch/.diff.exclude 713-fs_aio_3_write/include/asm-i386/semaphore.h 714-fs_aio_4_down_wq/include/asm-i386/semaphore.h
--- 713-fs_aio_3_write/include/asm-i386/semaphore.h	Wed Mar  5 07:37:06 2003
+++ 714-fs_aio_4_down_wq/include/asm-i386/semaphore.h	Fri May 30 22:04:52 2003
@@ -96,39 +96,48 @@ static inline void init_MUTEX_LOCKED (st
 	sema_init(sem, 0);
 }
 
-asmlinkage void __down_failed(void /* special register calling convention */);
+asmlinkage int __down_failed_wq(void /* special register calling convention */);
 asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
 asmlinkage int  __down_failed_trylock(void  /* params in registers */);
 asmlinkage void __up_wakeup(void /* special register calling convention */);
 
-asmlinkage void __down(struct semaphore * sem);
+asmlinkage int __down_wq(struct semaphore * sem, wait_queue_t *wait);
 asmlinkage int  __down_interruptible(struct semaphore * sem);
 asmlinkage int  __down_trylock(struct semaphore * sem);
 asmlinkage void __up(struct semaphore * sem);
 
 /*
  * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
+ * "__down_failed_wq" is a special asm handler that calls the C
  * routine that actually waits. See arch/i386/kernel/semaphore.c
  */
-static inline void down(struct semaphore * sem)
+static inline int down_wq(struct semaphore * sem, wait_queue_t *wait)
 {
+	int result;
+
 #ifdef WAITQUEUE_DEBUG
 	CHECK_MAGIC(sem->__magic);
 #endif
 	might_sleep();
 	__asm__ __volatile__(
 		"# atomic down operation\n\t"
-		LOCK "decl %0\n\t"     /* --sem->count */
-		"js 2f\n"
+		LOCK "decl %1\n\t"     /* --sem->count */
+		"js 2f\n\t"
+		"xorl %0,%0\n"
 		"1:\n"
 		LOCK_SECTION_START("")
-		"2:\tcall __down_failed\n\t"
+		"2:\tcall __down_failed_wq\n\t"
 		"jmp 1b\n"
 		LOCK_SECTION_END
-		:"=m" (sem->count)
-		:"c" (sem)
+		:"=a" (result), "=m" (sem->count)
+		:"c" (sem), "d" (wait)
 		:"memory");
+	return result;
+}
+
+static inline void down(struct semaphore * sem)
+{
+	down_wq(sem, NULL);	
 }
 
 /*