Commit 2f130980 authored by Mark Haverkamp's avatar Mark Haverkamp Committed by James Bottomley

[SCSI] aacraid: aacraid: AIF preallocation (update)

Recevied from Mark Salyzyn from Adaptec.

Aif pre-allocation is used to pull the kmalloc outside of the locks.

Applies to the scsi-misc-2.6 git tree.
Signed-off-by: default avatarMark Haverkamp <markh@osdl.org>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 7a8cf29d
...@@ -805,7 +805,6 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -805,7 +805,6 @@ int aac_command_thread(struct aac_dev * dev)
{ {
struct hw_fib *hw_fib, *hw_newfib; struct hw_fib *hw_fib, *hw_newfib;
struct fib *fib, *newfib; struct fib *fib, *newfib;
struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx; struct aac_fib_context *fibctx;
unsigned long flags; unsigned long flags;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
...@@ -825,21 +824,22 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -825,21 +824,22 @@ int aac_command_thread(struct aac_dev * dev)
* Let the DPC know it has a place to send the AIF's to. * Let the DPC know it has a place to send the AIF's to.
*/ */
dev->aif_thread = 1; dev->aif_thread = 1;
add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
dprintk ((KERN_INFO "aac_command_thread start\n"));
while(1) while(1)
{ {
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry; struct list_head *entry;
struct aac_aifcmd * aifcmd; struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
entry = queues->queue[HostNormCmdQueue].cmdq.next; entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry); list_del(entry);
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct fib, fiblink); fib = list_entry(entry, struct fib, fiblink);
/* /*
* We will process the FIB here or pass it to a * We will process the FIB here or pass it to a
...@@ -869,9 +869,54 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -869,9 +869,54 @@ int aac_command_thread(struct aac_dev * dev)
u32 time_now, time_last; u32 time_now, time_last;
unsigned long flagv; unsigned long flagv;
unsigned num;
struct hw_fib ** hw_fib_pool, ** hw_fib_p;
struct fib ** fib_pool, ** fib_p;
time_now = jiffies/HZ; time_now = jiffies/HZ;
/*
* Warning: no sleep allowed while
* holding spinlock. We take the estimate
* and pre-allocate a set of fibs outside the
* lock.
*/
num = le32_to_cpu(dev->init->AdapterFibsSize)
/ sizeof(struct hw_fib); /* some extra */
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
while (entry != &dev->fib_list) {
entry = entry->next;
++num;
}
spin_unlock_irqrestore(&dev->fib_lock, flagv);
hw_fib_pool = NULL;
fib_pool = NULL;
if (num
&& ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
&& ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
while (hw_fib_p < &hw_fib_pool[num]) {
if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
--hw_fib_p;
break;
}
if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
kfree(*(--hw_fib_p));
break;
}
}
if ((num = hw_fib_p - hw_fib_pool) == 0) {
kfree(fib_pool);
fib_pool = NULL;
kfree(hw_fib_pool);
hw_fib_pool = NULL;
}
} else if (hw_fib_pool) {
kfree(hw_fib_pool);
hw_fib_pool = NULL;
}
spin_lock_irqsave(&dev->fib_lock, flagv); spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next; entry = dev->fib_list.next;
/* /*
...@@ -880,6 +925,8 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -880,6 +925,8 @@ int aac_command_thread(struct aac_dev * dev)
* fib, and then set the event to wake up the * fib, and then set the event to wake up the
* thread that is waiting for it. * thread that is waiting for it.
*/ */
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
while (entry != &dev->fib_list) { while (entry != &dev->fib_list) {
/* /*
* Extract the fibctx * Extract the fibctx
...@@ -912,9 +959,11 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -912,9 +959,11 @@ int aac_command_thread(struct aac_dev * dev)
* Warning: no sleep allowed while * Warning: no sleep allowed while
* holding spinlock * holding spinlock
*/ */
hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); if (hw_fib_p < &hw_fib_pool[num]) {
newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); hw_newfib = *hw_fib_p;
if (newfib && hw_newfib) { *(hw_fib_p++) = NULL;
newfib = *fib_p;
*(fib_p++) = NULL;
/* /*
* Make the copy of the FIB * Make the copy of the FIB
*/ */
...@@ -929,15 +978,11 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -929,15 +978,11 @@ int aac_command_thread(struct aac_dev * dev)
fibctx->count++; fibctx->count++;
/* /*
* Set the event to wake up the * Set the event to wake up the
* thread that will waiting. * thread that is waiting.
*/ */
up(&fibctx->wait_sem); up(&fibctx->wait_sem);
} else { } else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
if(newfib)
kfree(newfib);
if(hw_newfib)
kfree(hw_newfib);
} }
entry = entry->next; entry = entry->next;
} }
...@@ -947,21 +992,38 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -947,21 +992,38 @@ int aac_command_thread(struct aac_dev * dev)
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fib, sizeof(u32)); fib_adapter_complete(fib, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv); spin_unlock_irqrestore(&dev->fib_lock, flagv);
/* Free up the remaining resources */
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
while (hw_fib_p < &hw_fib_pool[num]) {
if (*hw_fib_p)
kfree(*hw_fib_p);
if (*fib_p)
kfree(*fib_p);
++fib_p;
++hw_fib_p;
}
if (hw_fib_pool)
kfree(hw_fib_pool);
if (fib_pool)
kfree(fib_pool);
} }
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
kfree(fib); kfree(fib);
spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
} }
/* /*
* There are no more AIF's * There are no more AIF's
*/ */
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
schedule(); schedule();
if(signal_pending(current)) if(signal_pending(current))
break; break;
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
} }
remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); if (dev->queues)
remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
dev->aif_thread = 0; dev->aif_thread = 0;
complete_and_exit(&dev->aif_completion, 0); complete_and_exit(&dev->aif_completion, 0);
return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment