2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 * Abstract: All DPC processing routines for the cyclone board occur here.
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/types.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
45 * aac_response_normal - Handle command replies
46 * @q: Queue to read from
48 * This DPC routine will be run when the adapter interrupts us to let us
49 * know there is a response on our normal priority queue. We will pull off
50 * all QE there are and wake up all the waiters before exiting. We will
51 * take a spinlock out on the queue before operating on it.
54 unsigned int aac_response_normal(struct aac_queue * q)
56 struct aac_dev * dev = q->dev;
57 struct aac_entry *entry;
58 struct hw_fib * hwfib;
61 unsigned long flags, mflags;
63 spin_lock_irqsave(q->lock, flags);
65 * Keep pulling response QEs off the response queue and waking
66 * up the waiters until there are no more QEs. We then return
67 * back to the system. If no response was requested we just
68 * deallocate the Fib here and continue.
70 while(aac_consumer_get(dev, q, &entry))
73 u32 index = le32_to_cpu(entry->addr);
75 fib = &dev->fibs[index >> 2];
76 hwfib = fib->hw_fib_va;
78 aac_consumer_free(dev, q, HostNormRespQueue);
80 * Remove this fib from the Outstanding I/O queue.
81 * But only if it has not already been timed out.
83 * If the fib has been timed out already, then just
84 * continue. The caller has already been notified that
87 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
89 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
90 spin_unlock_irqrestore(q->lock, flags);
91 aac_fib_complete(fib);
93 spin_lock_irqsave(q->lock, flags);
96 spin_unlock_irqrestore(q->lock, flags);
102 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
103 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
104 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
107 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
109 if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
111 __le32 *pstatus = (__le32 *)hwfib->data;
112 if (*pstatus & cpu_to_le32(0xffff0000))
113 *pstatus = cpu_to_le32(ST_OK);
115 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
117 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
118 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
120 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
122 * NOTE: we cannot touch the fib after this
123 * call, because it may have been deallocated.
125 fib->callback(fib->callback_data, fib);
128 spin_lock_irqsave(&fib->event_lock, flagv);
131 complete(&fib->event_wait);
133 spin_unlock_irqrestore(&fib->event_lock, flagv);
135 spin_lock_irqsave(&dev->manage_lock, mflags);
136 dev->management_fib_count--;
137 spin_unlock_irqrestore(&dev->manage_lock, mflags);
139 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
140 if (fib->done == 2) {
141 spin_lock_irqsave(&fib->event_lock, flagv);
143 spin_unlock_irqrestore(&fib->event_lock, flagv);
144 aac_fib_complete(fib);
149 spin_lock_irqsave(q->lock, flags);
152 if (consumed > aac_config.peak_fibs)
153 aac_config.peak_fibs = consumed;
155 aac_config.zero_fibs++;
157 spin_unlock_irqrestore(q->lock, flags);
163 * aac_command_normal - handle commands
164 * @q: queue to process
166 * This DPC routine will be queued when the adapter interrupts us to
167 * let us know there is a command on our normal priority queue. We will
168 * pull off all QE there are and wake up all the waiters before exiting.
169 * We will take a spinlock out on the queue before operating on it.
172 unsigned int aac_command_normal(struct aac_queue *q)
174 struct aac_dev * dev = q->dev;
175 struct aac_entry *entry;
178 spin_lock_irqsave(q->lock, flags);
181 * Keep pulling response QEs off the response queue and waking
182 * up the waiters until there are no more QEs. We then return
183 * back to the system.
185 while(aac_consumer_get(dev, q, &entry))
188 struct hw_fib * hw_fib;
190 struct fib *fib = &fibctx;
192 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
193 hw_fib = &dev->aif_base_va[index];
196 * Allocate a FIB at all costs. For non queued stuff
197 * we can just use the stack so we are happy. We need
198 * a fib object in order to manage the linked lists
201 if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
204 memset(fib, 0, sizeof(struct fib));
205 INIT_LIST_HEAD(&fib->fiblink);
206 fib->type = FSAFS_NTC_FIB_CONTEXT;
207 fib->size = sizeof(struct fib);
208 fib->hw_fib_va = hw_fib;
209 fib->data = hw_fib->data;
213 if (dev->aif_thread && fib != &fibctx) {
214 list_add_tail(&fib->fiblink, &q->cmdq);
215 aac_consumer_free(dev, q, HostNormCmdQueue);
216 wake_up_interruptible(&q->cmdready);
218 aac_consumer_free(dev, q, HostNormCmdQueue);
219 spin_unlock_irqrestore(q->lock, flags);
221 * Set the status of this FIB
223 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
224 aac_fib_adapter_complete(fib, sizeof(u32));
225 spin_lock_irqsave(q->lock, flags);
228 spin_unlock_irqrestore(q->lock, flags);
235 * @context: the context set in the fib - here it is scsi cmd
236 * @fibptr: pointer to the fib
238 * Handles the AIFs - new method (SRC)
242 static void aac_aif_callback(void *context, struct fib * fibptr)
246 struct aac_aifcmd *cmd;
249 fibctx = (struct fib *)context;
250 BUG_ON(fibptr == NULL);
253 if ((fibptr->hw_fib_va->header.XferState &
254 cpu_to_le32(NoMoreAifDataAvailable)) ||
256 aac_fib_complete(fibptr);
257 aac_fib_free(fibptr);
261 aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
263 aac_fib_init(fibctx);
264 cmd = (struct aac_aifcmd *) fib_data(fibctx);
265 cmd->command = cpu_to_le32(AifReqEvent);
267 status = aac_fib_send(AifRequest,
269 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
272 (fib_callback)aac_aif_callback, fibctx);
277 * aac_intr_normal - Handle command replies
279 * @index: completion reference
281 * This DPC routine will be run when the adapter interrupts us to let us
282 * know there is a response on our normal priority queue. We will pull off
283 * all QE there are and wake up all the waiters before exiting.
285 unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
286 int isFastResponse, struct hw_fib *aif_fib)
288 unsigned long mflags;
289 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
290 if (isAif == 1) { /* AIF - common */
291 struct hw_fib * hw_fib;
293 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
297 * Allocate a FIB. For non queued stuff we can just use
298 * the stack so we are happy. We need a fib object in order to
299 * manage the linked lists.
301 if ((!dev->aif_thread)
302 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
304 if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
308 if (dev->sa_firmware) {
309 fib->hbacmd_size = index; /* store event type */
310 } else if (aif_fib != NULL) {
311 memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
313 memcpy(hw_fib, (struct hw_fib *)
314 (((uintptr_t)(dev->regs.sa)) + index),
315 sizeof(struct hw_fib));
317 INIT_LIST_HEAD(&fib->fiblink);
318 fib->type = FSAFS_NTC_FIB_CONTEXT;
319 fib->size = sizeof(struct fib);
320 fib->hw_fib_va = hw_fib;
321 fib->data = hw_fib->data;
324 spin_lock_irqsave(q->lock, flags);
325 list_add_tail(&fib->fiblink, &q->cmdq);
326 wake_up_interruptible(&q->cmdready);
327 spin_unlock_irqrestore(q->lock, flags);
329 } else if (isAif == 2) { /* AIF - new (SRC) */
331 struct aac_aifcmd *cmd;
333 fibctx = aac_fib_alloc(dev);
336 aac_fib_init(fibctx);
338 cmd = (struct aac_aifcmd *) fib_data(fibctx);
339 cmd->command = cpu_to_le32(AifReqEvent);
341 return aac_fib_send(AifRequest,
343 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
346 (fib_callback)aac_aif_callback, fibctx);
348 struct fib *fib = &dev->fibs[index];
349 int start_callback = 0;
352 * Remove this fib from the Outstanding I/O queue.
353 * But only if it has not already been timed out.
355 * If the fib has been timed out already, then just
356 * continue. The caller has already been notified that
359 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
361 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
362 aac_fib_complete(fib);
367 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
369 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
372 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
380 dprintk((KERN_INFO "event_wait up\n"));
381 spin_lock_irqsave(&fib->event_lock, flagv);
382 if (fib->done == 2) {
387 complete(&fib->event_wait);
389 spin_unlock_irqrestore(&fib->event_lock, flagv);
391 spin_lock_irqsave(&dev->manage_lock, mflags);
392 dev->management_fib_count--;
393 spin_unlock_irqrestore(&dev->manage_lock,
396 FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
398 aac_fib_complete(fib);
401 struct hw_fib *hwfib = fib->hw_fib_va;
403 if (isFastResponse) {
405 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
406 hwfib->header.XferState |=
407 cpu_to_le32(AdapterProcessed);
408 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
411 if (hwfib->header.Command ==
412 cpu_to_le16(NuFileSystem)) {
413 __le32 *pstatus = (__le32 *)hwfib->data;
415 if (*pstatus & cpu_to_le32(0xffff0000))
416 *pstatus = cpu_to_le32(ST_OK);
418 if (hwfib->header.XferState &
419 cpu_to_le32(NoResponseExpected | Async)) {
420 if (hwfib->header.XferState & cpu_to_le32(
422 FIB_COUNTER_INCREMENT(
423 aac_config.NoResponseRecved);
425 FIB_COUNTER_INCREMENT(
426 aac_config.AsyncRecved);
432 dprintk((KERN_INFO "event_wait up\n"));
433 spin_lock_irqsave(&fib->event_lock, flagv);
434 if (fib->done == 2) {
439 complete(&fib->event_wait);
441 spin_unlock_irqrestore(&fib->event_lock, flagv);
443 spin_lock_irqsave(&dev->manage_lock, mflags);
444 dev->management_fib_count--;
445 spin_unlock_irqrestore(&dev->manage_lock,
448 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
450 aac_fib_complete(fib);
455 if (start_callback) {
457 * NOTE: we cannot touch the fib after this
458 * call, because it may have been deallocated.
460 if (likely(fib->callback && fib->callback_data)) {
461 fib->callback(fib->callback_data, fib);
463 aac_fib_complete(fib);