block: remove i_bdev
[linux-2.6-microblaze.git] / drivers / block / skd_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
4  * was acquired by Western Digital in 2012.
5  *
6  * Copyright 2012 sTec, Inc.
7  * Copyright (c) 2017 Western Digital Corporation or its affiliates.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/blkdev.h>
17 #include <linux/blk-mq.h>
18 #include <linux/sched.h>
19 #include <linux/interrupt.h>
20 #include <linux/compiler.h>
21 #include <linux/workqueue.h>
22 #include <linux/delay.h>
23 #include <linux/time.h>
24 #include <linux/hdreg.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/completion.h>
27 #include <linux/scatterlist.h>
28 #include <linux/err.h>
29 #include <linux/aer.h>
30 #include <linux/wait.h>
31 #include <linux/stringify.h>
32 #include <scsi/scsi.h>
33 #include <scsi/sg.h>
34 #include <linux/io.h>
35 #include <linux/uaccess.h>
36 #include <asm/unaligned.h>
37
38 #include "skd_s1120.h"
39
40 static int skd_dbg_level;
41 static int skd_isr_comp_limit = 4;
42
43 #define SKD_ASSERT(expr) \
44         do { \
45                 if (unlikely(!(expr))) { \
46                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
47                                # expr, __FILE__, __func__, __LINE__); \
48                 } \
49         } while (0)
50
51 #define DRV_NAME "skd"
52 #define PFX DRV_NAME ": "
53
54 MODULE_LICENSE("GPL");
55
56 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
57
58 #define PCI_VENDOR_ID_STEC      0x1B39
59 #define PCI_DEVICE_ID_S1120     0x0001
60
61 #define SKD_FUA_NV              (1 << 1)
62 #define SKD_MINORS_PER_DEVICE   16
63
64 #define SKD_MAX_QUEUE_DEPTH     200u
65
66 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
67
68 #define SKD_N_FITMSG_BYTES      (512u)
69 #define SKD_MAX_REQ_PER_MSG     14
70
71 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
72
73 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
74  * 128KB limit.  That allows 4096*4K = 16M xfer size
75  */
76 #define SKD_N_SG_PER_REQ_DEFAULT 256u
77
78 #define SKD_N_COMPLETION_ENTRY  256u
79 #define SKD_N_READ_CAP_BYTES    (8u)
80
81 #define SKD_N_INTERNAL_BYTES    (512u)
82
83 #define SKD_SKCOMP_SIZE                                                 \
84         ((sizeof(struct fit_completion_entry_v1) +                      \
85           sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
86
87 /* 5 bits of uniqifier, 0xF800 */
88 #define SKD_ID_TABLE_MASK       (3u << 8u)
89 #define  SKD_ID_RW_REQUEST      (0u << 8u)
90 #define  SKD_ID_INTERNAL        (1u << 8u)
91 #define  SKD_ID_FIT_MSG         (3u << 8u)
92 #define SKD_ID_SLOT_MASK        0x00FFu
93 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
94
95 #define SKD_N_MAX_SECTORS 2048u
96
97 #define SKD_MAX_RETRIES 2u
98
99 #define SKD_TIMER_SECONDS(seconds) (seconds)
100 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
101
102 #define INQ_STD_NBYTES 36
103
104 enum skd_drvr_state {
105         SKD_DRVR_STATE_LOAD,
106         SKD_DRVR_STATE_IDLE,
107         SKD_DRVR_STATE_BUSY,
108         SKD_DRVR_STATE_STARTING,
109         SKD_DRVR_STATE_ONLINE,
110         SKD_DRVR_STATE_PAUSING,
111         SKD_DRVR_STATE_PAUSED,
112         SKD_DRVR_STATE_RESTARTING,
113         SKD_DRVR_STATE_RESUMING,
114         SKD_DRVR_STATE_STOPPING,
115         SKD_DRVR_STATE_FAULT,
116         SKD_DRVR_STATE_DISAPPEARED,
117         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
118         SKD_DRVR_STATE_BUSY_ERASE,
119         SKD_DRVR_STATE_BUSY_SANITIZE,
120         SKD_DRVR_STATE_BUSY_IMMINENT,
121         SKD_DRVR_STATE_WAIT_BOOT,
122         SKD_DRVR_STATE_SYNCING,
123 };
124
125 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
126 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
127 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
128 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
129 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
130 #define SKD_START_WAIT_SECONDS  90u
131
132 enum skd_req_state {
133         SKD_REQ_STATE_IDLE,
134         SKD_REQ_STATE_SETUP,
135         SKD_REQ_STATE_BUSY,
136         SKD_REQ_STATE_COMPLETED,
137         SKD_REQ_STATE_TIMEOUT,
138 };
139
140 enum skd_check_status_action {
141         SKD_CHECK_STATUS_REPORT_GOOD,
142         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
143         SKD_CHECK_STATUS_REQUEUE_REQUEST,
144         SKD_CHECK_STATUS_REPORT_ERROR,
145         SKD_CHECK_STATUS_BUSY_IMMINENT,
146 };
147
148 struct skd_msg_buf {
149         struct fit_msg_hdr      fmh;
150         struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
151 };
152
153 struct skd_fitmsg_context {
154         u32 id;
155
156         u32 length;
157
158         struct skd_msg_buf *msg_buf;
159         dma_addr_t mb_dma_address;
160 };
161
162 struct skd_request_context {
163         enum skd_req_state state;
164
165         u16 id;
166         u32 fitmsg_id;
167
168         u8 flush_cmd;
169
170         enum dma_data_direction data_dir;
171         struct scatterlist *sg;
172         u32 n_sg;
173         u32 sg_byte_count;
174
175         struct fit_sg_descriptor *sksg_list;
176         dma_addr_t sksg_dma_address;
177
178         struct fit_completion_entry_v1 completion;
179
180         struct fit_comp_error_info err_info;
181         int retries;
182
183         blk_status_t status;
184 };
185
186 struct skd_special_context {
187         struct skd_request_context req;
188
189         void *data_buf;
190         dma_addr_t db_dma_address;
191
192         struct skd_msg_buf *msg_buf;
193         dma_addr_t mb_dma_address;
194 };
195
196 typedef enum skd_irq_type {
197         SKD_IRQ_LEGACY,
198         SKD_IRQ_MSI,
199         SKD_IRQ_MSIX
200 } skd_irq_type_t;
201
202 #define SKD_MAX_BARS                    2
203
204 struct skd_device {
205         void __iomem *mem_map[SKD_MAX_BARS];
206         resource_size_t mem_phys[SKD_MAX_BARS];
207         u32 mem_size[SKD_MAX_BARS];
208
209         struct skd_msix_entry *msix_entries;
210
211         struct pci_dev *pdev;
212         int pcie_error_reporting_is_enabled;
213
214         spinlock_t lock;
215         struct gendisk *disk;
216         struct blk_mq_tag_set tag_set;
217         struct request_queue *queue;
218         struct skd_fitmsg_context *skmsg;
219         struct device *class_dev;
220         int gendisk_on;
221         int sync_done;
222
223         u32 devno;
224         u32 major;
225         char isr_name[30];
226
227         enum skd_drvr_state state;
228         u32 drive_state;
229
230         u32 cur_max_queue_depth;
231         u32 queue_low_water_mark;
232         u32 dev_max_queue_depth;
233
234         u32 num_fitmsg_context;
235         u32 num_req_context;
236
237         struct skd_fitmsg_context *skmsg_table;
238
239         struct skd_special_context internal_skspcl;
240         u32 read_cap_blocksize;
241         u32 read_cap_last_lba;
242         int read_cap_is_valid;
243         int inquiry_is_valid;
244         u8 inq_serial_num[13];  /*12 chars plus null term */
245
246         u8 skcomp_cycle;
247         u32 skcomp_ix;
248         struct kmem_cache *msgbuf_cache;
249         struct kmem_cache *sglist_cache;
250         struct kmem_cache *databuf_cache;
251         struct fit_completion_entry_v1 *skcomp_table;
252         struct fit_comp_error_info *skerr_table;
253         dma_addr_t cq_dma_address;
254
255         wait_queue_head_t waitq;
256
257         struct timer_list timer;
258         u32 timer_countdown;
259         u32 timer_substate;
260
261         int sgs_per_request;
262         u32 last_mtd;
263
264         u32 proto_ver;
265
266         int dbg_level;
267         u32 connect_time_stamp;
268         int connect_retries;
269 #define SKD_MAX_CONNECT_RETRIES 16
270         u32 drive_jiffies;
271
272         u32 timo_slot;
273
274         struct work_struct start_queue;
275         struct work_struct completion_worker;
276 };
277
278 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
279 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
280 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
281
282 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
283 {
284         u32 val = readl(skdev->mem_map[1] + offset);
285
286         if (unlikely(skdev->dbg_level >= 2))
287                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
288         return val;
289 }
290
291 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
292                                    u32 offset)
293 {
294         writel(val, skdev->mem_map[1] + offset);
295         if (unlikely(skdev->dbg_level >= 2))
296                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
297 }
298
299 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
300                                    u32 offset)
301 {
302         writeq(val, skdev->mem_map[1] + offset);
303         if (unlikely(skdev->dbg_level >= 2))
304                 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
305                         val);
306 }
307
308
309 #define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
310 static int skd_isr_type = SKD_IRQ_DEFAULT;
311
312 module_param(skd_isr_type, int, 0444);
313 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
314                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
315
316 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
317 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
318
319 module_param(skd_max_req_per_msg, int, 0444);
320 MODULE_PARM_DESC(skd_max_req_per_msg,
321                  "Maximum SCSI requests packed in a single message."
322                  " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
323
324 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
325 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
326 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
327
328 module_param(skd_max_queue_depth, int, 0444);
329 MODULE_PARM_DESC(skd_max_queue_depth,
330                  "Maximum SCSI requests issued to s1120."
331                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
332
333 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
334 module_param(skd_sgs_per_request, int, 0444);
335 MODULE_PARM_DESC(skd_sgs_per_request,
336                  "Maximum SG elements per block request."
337                  " (1-4096, default==256)");
338
339 static int skd_max_pass_thru = 1;
340 module_param(skd_max_pass_thru, int, 0444);
341 MODULE_PARM_DESC(skd_max_pass_thru,
342                  "Maximum SCSI pass-thru at a time. IGNORED");
343
344 module_param(skd_dbg_level, int, 0444);
345 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
346
347 module_param(skd_isr_comp_limit, int, 0444);
348 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
349
350 /* Major device number dynamically assigned. */
351 static u32 skd_major;
352
353 static void skd_destruct(struct skd_device *skdev);
354 static const struct block_device_operations skd_blockdev_ops;
355 static void skd_send_fitmsg(struct skd_device *skdev,
356                             struct skd_fitmsg_context *skmsg);
357 static void skd_send_special_fitmsg(struct skd_device *skdev,
358                                     struct skd_special_context *skspcl);
359 static bool skd_preop_sg_list(struct skd_device *skdev,
360                              struct skd_request_context *skreq);
361 static void skd_postop_sg_list(struct skd_device *skdev,
362                                struct skd_request_context *skreq);
363
364 static void skd_restart_device(struct skd_device *skdev);
365 static int skd_quiesce_dev(struct skd_device *skdev);
366 static int skd_unquiesce_dev(struct skd_device *skdev);
367 static void skd_disable_interrupts(struct skd_device *skdev);
368 static void skd_isr_fwstate(struct skd_device *skdev);
369 static void skd_recover_requests(struct skd_device *skdev);
370 static void skd_soft_reset(struct skd_device *skdev);
371
372 const char *skd_drive_state_to_str(int state);
373 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
374 static void skd_log_skdev(struct skd_device *skdev, const char *event);
375 static void skd_log_skreq(struct skd_device *skdev,
376                           struct skd_request_context *skreq, const char *event);
377
378 /*
379  *****************************************************************************
380  * READ/WRITE REQUESTS
381  *****************************************************************************
382  */
383 static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
384 {
385         int *count = data;
386
387         count++;
388         return true;
389 }
390
391 static int skd_in_flight(struct skd_device *skdev)
392 {
393         int count = 0;
394
395         blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
396
397         return count;
398 }
399
400 static void
401 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
402                 int data_dir, unsigned lba,
403                 unsigned count)
404 {
405         if (data_dir == READ)
406                 scsi_req->cdb[0] = READ_10;
407         else
408                 scsi_req->cdb[0] = WRITE_10;
409
410         scsi_req->cdb[1] = 0;
411         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
412         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
413         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
414         scsi_req->cdb[5] = (lba & 0xff);
415         scsi_req->cdb[6] = 0;
416         scsi_req->cdb[7] = (count & 0xff00) >> 8;
417         scsi_req->cdb[8] = count & 0xff;
418         scsi_req->cdb[9] = 0;
419 }
420
421 static void
422 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
423                             struct skd_request_context *skreq)
424 {
425         skreq->flush_cmd = 1;
426
427         scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
428         scsi_req->cdb[1] = 0;
429         scsi_req->cdb[2] = 0;
430         scsi_req->cdb[3] = 0;
431         scsi_req->cdb[4] = 0;
432         scsi_req->cdb[5] = 0;
433         scsi_req->cdb[6] = 0;
434         scsi_req->cdb[7] = 0;
435         scsi_req->cdb[8] = 0;
436         scsi_req->cdb[9] = 0;
437 }
438
439 /*
440  * Return true if and only if all pending requests should be failed.
441  */
442 static bool skd_fail_all(struct request_queue *q)
443 {
444         struct skd_device *skdev = q->queuedata;
445
446         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
447
448         skd_log_skdev(skdev, "req_not_online");
449         switch (skdev->state) {
450         case SKD_DRVR_STATE_PAUSING:
451         case SKD_DRVR_STATE_PAUSED:
452         case SKD_DRVR_STATE_STARTING:
453         case SKD_DRVR_STATE_RESTARTING:
454         case SKD_DRVR_STATE_WAIT_BOOT:
455         /* In case of starting, we haven't started the queue,
456          * so we can't get here... but requests are
457          * possibly hanging out waiting for us because we
458          * reported the dev/skd0 already.  They'll wait
459          * forever if connect doesn't complete.
460          * What to do??? delay dev/skd0 ??
461          */
462         case SKD_DRVR_STATE_BUSY:
463         case SKD_DRVR_STATE_BUSY_IMMINENT:
464         case SKD_DRVR_STATE_BUSY_ERASE:
465                 return false;
466
467         case SKD_DRVR_STATE_BUSY_SANITIZE:
468         case SKD_DRVR_STATE_STOPPING:
469         case SKD_DRVR_STATE_SYNCING:
470         case SKD_DRVR_STATE_FAULT:
471         case SKD_DRVR_STATE_DISAPPEARED:
472         default:
473                 return true;
474         }
475 }
476
477 static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
478                                     const struct blk_mq_queue_data *mqd)
479 {
480         struct request *const req = mqd->rq;
481         struct request_queue *const q = req->q;
482         struct skd_device *skdev = q->queuedata;
483         struct skd_fitmsg_context *skmsg;
484         struct fit_msg_hdr *fmh;
485         const u32 tag = blk_mq_unique_tag(req);
486         struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
487         struct skd_scsi_request *scsi_req;
488         unsigned long flags = 0;
489         const u32 lba = blk_rq_pos(req);
490         const u32 count = blk_rq_sectors(req);
491         const int data_dir = rq_data_dir(req);
492
493         if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
494                 return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
495
496         if (!(req->rq_flags & RQF_DONTPREP)) {
497                 skreq->retries = 0;
498                 req->rq_flags |= RQF_DONTPREP;
499         }
500
501         blk_mq_start_request(req);
502
503         WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
504                   tag, skd_max_queue_depth, q->nr_requests);
505
506         SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
507
508         dev_dbg(&skdev->pdev->dev,
509                 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
510                 lba, count, count, data_dir);
511
512         skreq->id = tag + SKD_ID_RW_REQUEST;
513         skreq->flush_cmd = 0;
514         skreq->n_sg = 0;
515         skreq->sg_byte_count = 0;
516
517         skreq->fitmsg_id = 0;
518
519         skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
520
521         if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
522                 dev_dbg(&skdev->pdev->dev, "error Out\n");
523                 skreq->status = BLK_STS_RESOURCE;
524                 blk_mq_complete_request(req);
525                 return BLK_STS_OK;
526         }
527
528         dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
529                                    skreq->n_sg *
530                                    sizeof(struct fit_sg_descriptor),
531                                    DMA_TO_DEVICE);
532
533         /* Either a FIT msg is in progress or we have to start one. */
534         if (skd_max_req_per_msg == 1) {
535                 skmsg = NULL;
536         } else {
537                 spin_lock_irqsave(&skdev->lock, flags);
538                 skmsg = skdev->skmsg;
539         }
540         if (!skmsg) {
541                 skmsg = &skdev->skmsg_table[tag];
542                 skdev->skmsg = skmsg;
543
544                 /* Initialize the FIT msg header */
545                 fmh = &skmsg->msg_buf->fmh;
546                 memset(fmh, 0, sizeof(*fmh));
547                 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
548                 skmsg->length = sizeof(*fmh);
549         } else {
550                 fmh = &skmsg->msg_buf->fmh;
551         }
552
553         skreq->fitmsg_id = skmsg->id;
554
555         scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
556         memset(scsi_req, 0, sizeof(*scsi_req));
557
558         scsi_req->hdr.tag = skreq->id;
559         scsi_req->hdr.sg_list_dma_address =
560                 cpu_to_be64(skreq->sksg_dma_address);
561
562         if (req_op(req) == REQ_OP_FLUSH) {
563                 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
564                 SKD_ASSERT(skreq->flush_cmd == 1);
565         } else {
566                 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
567         }
568
569         if (req->cmd_flags & REQ_FUA)
570                 scsi_req->cdb[1] |= SKD_FUA_NV;
571
572         scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
573
574         /* Complete resource allocations. */
575         skreq->state = SKD_REQ_STATE_BUSY;
576
577         skmsg->length += sizeof(struct skd_scsi_request);
578         fmh->num_protocol_cmds_coalesced++;
579
580         dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
581                 skd_in_flight(skdev));
582
583         /*
584          * If the FIT msg buffer is full send it.
585          */
586         if (skd_max_req_per_msg == 1) {
587                 skd_send_fitmsg(skdev, skmsg);
588         } else {
589                 if (mqd->last ||
590                     fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
591                         skd_send_fitmsg(skdev, skmsg);
592                         skdev->skmsg = NULL;
593                 }
594                 spin_unlock_irqrestore(&skdev->lock, flags);
595         }
596
597         return BLK_STS_OK;
598 }
599
600 static enum blk_eh_timer_return skd_timed_out(struct request *req,
601                                               bool reserved)
602 {
603         struct skd_device *skdev = req->q->queuedata;
604
605         dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
606                 blk_mq_unique_tag(req));
607
608         return BLK_EH_RESET_TIMER;
609 }
610
611 static void skd_complete_rq(struct request *req)
612 {
613         struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
614
615         blk_mq_end_request(req, skreq->status);
616 }
617
618 static bool skd_preop_sg_list(struct skd_device *skdev,
619                              struct skd_request_context *skreq)
620 {
621         struct request *req = blk_mq_rq_from_pdu(skreq);
622         struct scatterlist *sgl = &skreq->sg[0], *sg;
623         int n_sg;
624         int i;
625
626         skreq->sg_byte_count = 0;
627
628         WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
629                      skreq->data_dir != DMA_FROM_DEVICE);
630
631         n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
632         if (n_sg <= 0)
633                 return false;
634
635         /*
636          * Map scatterlist to PCI bus addresses.
637          * Note PCI might change the number of entries.
638          */
639         n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
640         if (n_sg <= 0)
641                 return false;
642
643         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
644
645         skreq->n_sg = n_sg;
646
647         for_each_sg(sgl, sg, n_sg, i) {
648                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
649                 u32 cnt = sg_dma_len(sg);
650                 uint64_t dma_addr = sg_dma_address(sg);
651
652                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
653                 sgd->byte_count = cnt;
654                 skreq->sg_byte_count += cnt;
655                 sgd->host_side_addr = dma_addr;
656                 sgd->dev_side_addr = 0;
657         }
658
659         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
660         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
661
662         if (unlikely(skdev->dbg_level > 1)) {
663                 dev_dbg(&skdev->pdev->dev,
664                         "skreq=%x sksg_list=%p sksg_dma=%pad\n",
665                         skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
666                 for (i = 0; i < n_sg; i++) {
667                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
668
669                         dev_dbg(&skdev->pdev->dev,
670                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
671                                 i, sgd->byte_count, sgd->control,
672                                 sgd->host_side_addr, sgd->next_desc_ptr);
673                 }
674         }
675
676         return true;
677 }
678
679 static void skd_postop_sg_list(struct skd_device *skdev,
680                                struct skd_request_context *skreq)
681 {
682         /*
683          * restore the next ptr for next IO request so we
684          * don't have to set it every time.
685          */
686         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
687                 skreq->sksg_dma_address +
688                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
689         dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
690                      skreq->data_dir);
691 }
692
693 /*
694  *****************************************************************************
695  * TIMER
696  *****************************************************************************
697  */
698
699 static void skd_timer_tick_not_online(struct skd_device *skdev);
700
701 static void skd_start_queue(struct work_struct *work)
702 {
703         struct skd_device *skdev = container_of(work, typeof(*skdev),
704                                                 start_queue);
705
706         /*
707          * Although it is safe to call blk_start_queue() from interrupt
708          * context, blk_mq_start_hw_queues() must not be called from
709          * interrupt context.
710          */
711         blk_mq_start_hw_queues(skdev->queue);
712 }
713
714 static void skd_timer_tick(struct timer_list *t)
715 {
716         struct skd_device *skdev = from_timer(skdev, t, timer);
717         unsigned long reqflags;
718         u32 state;
719
720         if (skdev->state == SKD_DRVR_STATE_FAULT)
721                 /* The driver has declared fault, and we want it to
722                  * stay that way until driver is reloaded.
723                  */
724                 return;
725
726         spin_lock_irqsave(&skdev->lock, reqflags);
727
728         state = SKD_READL(skdev, FIT_STATUS);
729         state &= FIT_SR_DRIVE_STATE_MASK;
730         if (state != skdev->drive_state)
731                 skd_isr_fwstate(skdev);
732
733         if (skdev->state != SKD_DRVR_STATE_ONLINE)
734                 skd_timer_tick_not_online(skdev);
735
736         mod_timer(&skdev->timer, (jiffies + HZ));
737
738         spin_unlock_irqrestore(&skdev->lock, reqflags);
739 }
740
741 static void skd_timer_tick_not_online(struct skd_device *skdev)
742 {
743         switch (skdev->state) {
744         case SKD_DRVR_STATE_IDLE:
745         case SKD_DRVR_STATE_LOAD:
746                 break;
747         case SKD_DRVR_STATE_BUSY_SANITIZE:
748                 dev_dbg(&skdev->pdev->dev,
749                         "drive busy sanitize[%x], driver[%x]\n",
750                         skdev->drive_state, skdev->state);
751                 /* If we've been in sanitize for 3 seconds, we figure we're not
752                  * going to get anymore completions, so recover requests now
753                  */
754                 if (skdev->timer_countdown > 0) {
755                         skdev->timer_countdown--;
756                         return;
757                 }
758                 skd_recover_requests(skdev);
759                 break;
760
761         case SKD_DRVR_STATE_BUSY:
762         case SKD_DRVR_STATE_BUSY_IMMINENT:
763         case SKD_DRVR_STATE_BUSY_ERASE:
764                 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
765                         skdev->state, skdev->timer_countdown);
766                 if (skdev->timer_countdown > 0) {
767                         skdev->timer_countdown--;
768                         return;
769                 }
770                 dev_dbg(&skdev->pdev->dev,
771                         "busy[%x], timedout=%d, restarting device.",
772                         skdev->state, skdev->timer_countdown);
773                 skd_restart_device(skdev);
774                 break;
775
776         case SKD_DRVR_STATE_WAIT_BOOT:
777         case SKD_DRVR_STATE_STARTING:
778                 if (skdev->timer_countdown > 0) {
779                         skdev->timer_countdown--;
780                         return;
781                 }
782                 /* For now, we fault the drive.  Could attempt resets to
783                  * revcover at some point. */
784                 skdev->state = SKD_DRVR_STATE_FAULT;
785
786                 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
787                         skdev->drive_state);
788
789                 /*start the queue so we can respond with error to requests */
790                 /* wakeup anyone waiting for startup complete */
791                 schedule_work(&skdev->start_queue);
792                 skdev->gendisk_on = -1;
793                 wake_up_interruptible(&skdev->waitq);
794                 break;
795
796         case SKD_DRVR_STATE_ONLINE:
797                 /* shouldn't get here. */
798                 break;
799
800         case SKD_DRVR_STATE_PAUSING:
801         case SKD_DRVR_STATE_PAUSED:
802                 break;
803
804         case SKD_DRVR_STATE_RESTARTING:
805                 if (skdev->timer_countdown > 0) {
806                         skdev->timer_countdown--;
807                         return;
808                 }
809                 /* For now, we fault the drive. Could attempt resets to
810                  * revcover at some point. */
811                 skdev->state = SKD_DRVR_STATE_FAULT;
812                 dev_err(&skdev->pdev->dev,
813                         "DriveFault Reconnect Timeout (%x)\n",
814                         skdev->drive_state);
815
816                 /*
817                  * Recovering does two things:
818                  * 1. completes IO with error
819                  * 2. reclaims dma resources
820                  * When is it safe to recover requests?
821                  * - if the drive state is faulted
822                  * - if the state is still soft reset after out timeout
823                  * - if the drive registers are dead (state = FF)
824                  * If it is "unsafe", we still need to recover, so we will
825                  * disable pci bus mastering and disable our interrupts.
826                  */
827
828                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
829                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
830                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
831                         /* It never came out of soft reset. Try to
832                          * recover the requests and then let them
833                          * fail. This is to mitigate hung processes. */
834                         skd_recover_requests(skdev);
835                 else {
836                         dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
837                                 skdev->drive_state);
838                         pci_disable_device(skdev->pdev);
839                         skd_disable_interrupts(skdev);
840                         skd_recover_requests(skdev);
841                 }
842
843                 /*start the queue so we can respond with error to requests */
844                 /* wakeup anyone waiting for startup complete */
845                 schedule_work(&skdev->start_queue);
846                 skdev->gendisk_on = -1;
847                 wake_up_interruptible(&skdev->waitq);
848                 break;
849
850         case SKD_DRVR_STATE_RESUMING:
851         case SKD_DRVR_STATE_STOPPING:
852         case SKD_DRVR_STATE_SYNCING:
853         case SKD_DRVR_STATE_FAULT:
854         case SKD_DRVR_STATE_DISAPPEARED:
855         default:
856                 break;
857         }
858 }
859
860 static int skd_start_timer(struct skd_device *skdev)
861 {
862         int rc;
863
864         timer_setup(&skdev->timer, skd_timer_tick, 0);
865
866         rc = mod_timer(&skdev->timer, (jiffies + HZ));
867         if (rc)
868                 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
869         return rc;
870 }
871
872 static void skd_kill_timer(struct skd_device *skdev)
873 {
874         del_timer_sync(&skdev->timer);
875 }
876
877 /*
878  *****************************************************************************
879  * INTERNAL REQUESTS -- generated by driver itself
880  *****************************************************************************
881  */
882
883 static int skd_format_internal_skspcl(struct skd_device *skdev)
884 {
885         struct skd_special_context *skspcl = &skdev->internal_skspcl;
886         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
887         struct fit_msg_hdr *fmh;
888         uint64_t dma_address;
889         struct skd_scsi_request *scsi;
890
891         fmh = &skspcl->msg_buf->fmh;
892         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
893         fmh->num_protocol_cmds_coalesced = 1;
894
895         scsi = &skspcl->msg_buf->scsi[0];
896         memset(scsi, 0, sizeof(*scsi));
897         dma_address = skspcl->req.sksg_dma_address;
898         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
899         skspcl->req.n_sg = 1;
900         sgd->control = FIT_SGD_CONTROL_LAST;
901         sgd->byte_count = 0;
902         sgd->host_side_addr = skspcl->db_dma_address;
903         sgd->dev_side_addr = 0;
904         sgd->next_desc_ptr = 0LL;
905
906         return 1;
907 }
908
909 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
910
911 static void skd_send_internal_skspcl(struct skd_device *skdev,
912                                      struct skd_special_context *skspcl,
913                                      u8 opcode)
914 {
915         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
916         struct skd_scsi_request *scsi;
917         unsigned char *buf = skspcl->data_buf;
918         int i;
919
920         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
921                 /*
922                  * A refresh is already in progress.
923                  * Just wait for it to finish.
924                  */
925                 return;
926
927         skspcl->req.state = SKD_REQ_STATE_BUSY;
928
929         scsi = &skspcl->msg_buf->scsi[0];
930         scsi->hdr.tag = skspcl->req.id;
931
932         memset(scsi->cdb, 0, sizeof(scsi->cdb));
933
934         switch (opcode) {
935         case TEST_UNIT_READY:
936                 scsi->cdb[0] = TEST_UNIT_READY;
937                 sgd->byte_count = 0;
938                 scsi->hdr.sg_list_len_bytes = 0;
939                 break;
940
941         case READ_CAPACITY:
942                 scsi->cdb[0] = READ_CAPACITY;
943                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
944                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
945                 break;
946
947         case INQUIRY:
948                 scsi->cdb[0] = INQUIRY;
949                 scsi->cdb[1] = 0x01;    /* evpd */
950                 scsi->cdb[2] = 0x80;    /* serial number page */
951                 scsi->cdb[4] = 0x10;
952                 sgd->byte_count = 16;
953                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
954                 break;
955
956         case SYNCHRONIZE_CACHE:
957                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
958                 sgd->byte_count = 0;
959                 scsi->hdr.sg_list_len_bytes = 0;
960                 break;
961
962         case WRITE_BUFFER:
963                 scsi->cdb[0] = WRITE_BUFFER;
964                 scsi->cdb[1] = 0x02;
965                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
966                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
967                 sgd->byte_count = WR_BUF_SIZE;
968                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
969                 /* fill incrementing byte pattern */
970                 for (i = 0; i < sgd->byte_count; i++)
971                         buf[i] = i & 0xFF;
972                 break;
973
974         case READ_BUFFER:
975                 scsi->cdb[0] = READ_BUFFER;
976                 scsi->cdb[1] = 0x02;
977                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
978                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
979                 sgd->byte_count = WR_BUF_SIZE;
980                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
981                 memset(skspcl->data_buf, 0, sgd->byte_count);
982                 break;
983
984         default:
985                 SKD_ASSERT("Don't know what to send");
986                 return;
987
988         }
989         skd_send_special_fitmsg(skdev, skspcl);
990 }
991
992 static void skd_refresh_device_data(struct skd_device *skdev)
993 {
994         struct skd_special_context *skspcl = &skdev->internal_skspcl;
995
996         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
997 }
998
999 static int skd_chk_read_buf(struct skd_device *skdev,
1000                             struct skd_special_context *skspcl)
1001 {
1002         unsigned char *buf = skspcl->data_buf;
1003         int i;
1004
1005         /* check for incrementing byte pattern */
1006         for (i = 0; i < WR_BUF_SIZE; i++)
1007                 if (buf[i] != (i & 0xFF))
1008                         return 1;
1009
1010         return 0;
1011 }
1012
1013 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1014                                  u8 code, u8 qual, u8 fruc)
1015 {
1016         /* If the check condition is of special interest, log a message */
1017         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1018             && (code == 0x04) && (qual == 0x06)) {
1019                 dev_err(&skdev->pdev->dev,
1020                         "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1021                         key, code, qual, fruc);
1022         }
1023 }
1024
1025 static void skd_complete_internal(struct skd_device *skdev,
1026                                   struct fit_completion_entry_v1 *skcomp,
1027                                   struct fit_comp_error_info *skerr,
1028                                   struct skd_special_context *skspcl)
1029 {
1030         u8 *buf = skspcl->data_buf;
1031         u8 status;
1032         int i;
1033         struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
1034
1035         lockdep_assert_held(&skdev->lock);
1036
1037         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1038
1039         dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1040
1041         dma_sync_single_for_cpu(&skdev->pdev->dev,
1042                                 skspcl->db_dma_address,
1043                                 skspcl->req.sksg_list[0].byte_count,
1044                                 DMA_BIDIRECTIONAL);
1045
1046         skspcl->req.completion = *skcomp;
1047         skspcl->req.state = SKD_REQ_STATE_IDLE;
1048
1049         status = skspcl->req.completion.status;
1050
1051         skd_log_check_status(skdev, status, skerr->key, skerr->code,
1052                              skerr->qual, skerr->fruc);
1053
1054         switch (scsi->cdb[0]) {
1055         case TEST_UNIT_READY:
1056                 if (status == SAM_STAT_GOOD)
1057                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1058                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1059                          (skerr->key == MEDIUM_ERROR))
1060                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1061                 else {
1062                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1063                                 dev_dbg(&skdev->pdev->dev,
1064                                         "TUR failed, don't send anymore state 0x%x\n",
1065                                         skdev->state);
1066                                 return;
1067                         }
1068                         dev_dbg(&skdev->pdev->dev,
1069                                 "**** TUR failed, retry skerr\n");
1070                         skd_send_internal_skspcl(skdev, skspcl,
1071                                                  TEST_UNIT_READY);
1072                 }
1073                 break;
1074
1075         case WRITE_BUFFER:
1076                 if (status == SAM_STAT_GOOD)
1077                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1078                 else {
1079                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1080                                 dev_dbg(&skdev->pdev->dev,
1081                                         "write buffer failed, don't send anymore state 0x%x\n",
1082                                         skdev->state);
1083                                 return;
1084                         }
1085                         dev_dbg(&skdev->pdev->dev,
1086                                 "**** write buffer failed, retry skerr\n");
1087                         skd_send_internal_skspcl(skdev, skspcl,
1088                                                  TEST_UNIT_READY);
1089                 }
1090                 break;
1091
1092         case READ_BUFFER:
1093                 if (status == SAM_STAT_GOOD) {
1094                         if (skd_chk_read_buf(skdev, skspcl) == 0)
1095                                 skd_send_internal_skspcl(skdev, skspcl,
1096                                                          READ_CAPACITY);
1097                         else {
1098                                 dev_err(&skdev->pdev->dev,
1099                                         "*** W/R Buffer mismatch %d ***\n",
1100                                         skdev->connect_retries);
1101                                 if (skdev->connect_retries <
1102                                     SKD_MAX_CONNECT_RETRIES) {
1103                                         skdev->connect_retries++;
1104                                         skd_soft_reset(skdev);
1105                                 } else {
1106                                         dev_err(&skdev->pdev->dev,
1107                                                 "W/R Buffer Connect Error\n");
1108                                         return;
1109                                 }
1110                         }
1111
1112                 } else {
1113                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1114                                 dev_dbg(&skdev->pdev->dev,
1115                                         "read buffer failed, don't send anymore state 0x%x\n",
1116                                         skdev->state);
1117                                 return;
1118                         }
1119                         dev_dbg(&skdev->pdev->dev,
1120                                 "**** read buffer failed, retry skerr\n");
1121                         skd_send_internal_skspcl(skdev, skspcl,
1122                                                  TEST_UNIT_READY);
1123                 }
1124                 break;
1125
1126         case READ_CAPACITY:
1127                 skdev->read_cap_is_valid = 0;
1128                 if (status == SAM_STAT_GOOD) {
1129                         skdev->read_cap_last_lba =
1130                                 (buf[0] << 24) | (buf[1] << 16) |
1131                                 (buf[2] << 8) | buf[3];
1132                         skdev->read_cap_blocksize =
1133                                 (buf[4] << 24) | (buf[5] << 16) |
1134                                 (buf[6] << 8) | buf[7];
1135
1136                         dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1137                                 skdev->read_cap_last_lba,
1138                                 skdev->read_cap_blocksize);
1139
1140                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1141
1142                         skdev->read_cap_is_valid = 1;
1143
1144                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1145                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1146                            (skerr->key == MEDIUM_ERROR)) {
1147                         skdev->read_cap_last_lba = ~0;
1148                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1149                         dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
1150                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1151                 } else {
1152                         dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
1153                         skd_send_internal_skspcl(skdev, skspcl,
1154                                                  TEST_UNIT_READY);
1155                 }
1156                 break;
1157
1158         case INQUIRY:
1159                 skdev->inquiry_is_valid = 0;
1160                 if (status == SAM_STAT_GOOD) {
1161                         skdev->inquiry_is_valid = 1;
1162
1163                         for (i = 0; i < 12; i++)
1164                                 skdev->inq_serial_num[i] = buf[i + 4];
1165                         skdev->inq_serial_num[12] = 0;
1166                 }
1167
1168                 if (skd_unquiesce_dev(skdev) < 0)
1169                         dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
1170                  /* connection is complete */
1171                 skdev->connect_retries = 0;
1172                 break;
1173
1174         case SYNCHRONIZE_CACHE:
1175                 if (status == SAM_STAT_GOOD)
1176                         skdev->sync_done = 1;
1177                 else
1178                         skdev->sync_done = -1;
1179                 wake_up_interruptible(&skdev->waitq);
1180                 break;
1181
1182         default:
1183                 SKD_ASSERT("we didn't send this");
1184         }
1185 }
1186
1187 /*
1188  *****************************************************************************
1189  * FIT MESSAGES
1190  *****************************************************************************
1191  */
1192
1193 static void skd_send_fitmsg(struct skd_device *skdev,
1194                             struct skd_fitmsg_context *skmsg)
1195 {
1196         u64 qcmd;
1197
1198         dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
1199                 &skmsg->mb_dma_address, skd_in_flight(skdev));
1200         dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
1201
1202         qcmd = skmsg->mb_dma_address;
1203         qcmd |= FIT_QCMD_QID_NORMAL;
1204
1205         if (unlikely(skdev->dbg_level > 1)) {
1206                 u8 *bp = (u8 *)skmsg->msg_buf;
1207                 int i;
1208                 for (i = 0; i < skmsg->length; i += 8) {
1209                         dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
1210                                 &bp[i]);
1211                         if (i == 0)
1212                                 i = 64 - 8;
1213                 }
1214         }
1215
1216         if (skmsg->length > 256)
1217                 qcmd |= FIT_QCMD_MSGSIZE_512;
1218         else if (skmsg->length > 128)
1219                 qcmd |= FIT_QCMD_MSGSIZE_256;
1220         else if (skmsg->length > 64)
1221                 qcmd |= FIT_QCMD_MSGSIZE_128;
1222         else
1223                 /*
1224                  * This makes no sense because the FIT msg header is
1225                  * 64 bytes. If the msg is only 64 bytes long it has
1226                  * no payload.
1227                  */
1228                 qcmd |= FIT_QCMD_MSGSIZE_64;
1229
1230         dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
1231                                    skmsg->length, DMA_TO_DEVICE);
1232
1233         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1234         smp_wmb();
1235
1236         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1237 }
1238
1239 static void skd_send_special_fitmsg(struct skd_device *skdev,
1240                                     struct skd_special_context *skspcl)
1241 {
1242         u64 qcmd;
1243
1244         WARN_ON_ONCE(skspcl->req.n_sg != 1);
1245
1246         if (unlikely(skdev->dbg_level > 1)) {
1247                 u8 *bp = (u8 *)skspcl->msg_buf;
1248                 int i;
1249
1250                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
1251                         dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
1252                                 &bp[i]);
1253                         if (i == 0)
1254                                 i = 64 - 8;
1255                 }
1256
1257                 dev_dbg(&skdev->pdev->dev,
1258                         "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
1259                         skspcl, skspcl->req.id, skspcl->req.sksg_list,
1260                         &skspcl->req.sksg_dma_address);
1261                 for (i = 0; i < skspcl->req.n_sg; i++) {
1262                         struct fit_sg_descriptor *sgd =
1263                                 &skspcl->req.sksg_list[i];
1264
1265                         dev_dbg(&skdev->pdev->dev,
1266                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1267                                 i, sgd->byte_count, sgd->control,
1268                                 sgd->host_side_addr, sgd->next_desc_ptr);
1269                 }
1270         }
1271
1272         /*
1273          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1274          * and one 64-byte SSDI command.
1275          */
1276         qcmd = skspcl->mb_dma_address;
1277         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1278
1279         dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
1280                                    SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
1281         dma_sync_single_for_device(&skdev->pdev->dev,
1282                                    skspcl->req.sksg_dma_address,
1283                                    1 * sizeof(struct fit_sg_descriptor),
1284                                    DMA_TO_DEVICE);
1285         dma_sync_single_for_device(&skdev->pdev->dev,
1286                                    skspcl->db_dma_address,
1287                                    skspcl->req.sksg_list[0].byte_count,
1288                                    DMA_BIDIRECTIONAL);
1289
1290         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1291         smp_wmb();
1292
1293         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1294 }
1295
1296 /*
1297  *****************************************************************************
1298  * COMPLETION QUEUE
1299  *****************************************************************************
1300  */
1301
1302 static void skd_complete_other(struct skd_device *skdev,
1303                                struct fit_completion_entry_v1 *skcomp,
1304                                struct fit_comp_error_info *skerr);
1305
1306 struct sns_info {
1307         u8 type;
1308         u8 stat;
1309         u8 key;
1310         u8 asc;
1311         u8 ascq;
1312         u8 mask;
1313         enum skd_check_status_action action;
1314 };
1315
1316 static struct sns_info skd_chkstat_table[] = {
1317         /* Good */
1318         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
1319           SKD_CHECK_STATUS_REPORT_GOOD },
1320
1321         /* Smart alerts */
1322         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
1323           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1324         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
1325           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1326         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
1327           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1328
1329         /* Retry (with limits) */
1330         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
1331           SKD_CHECK_STATUS_REQUEUE_REQUEST },
1332         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
1333           SKD_CHECK_STATUS_REQUEUE_REQUEST },
1334         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
1335           SKD_CHECK_STATUS_REQUEUE_REQUEST },
1336         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
1337           SKD_CHECK_STATUS_REQUEUE_REQUEST },
1338
1339         /* Busy (or about to be) */
1340         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
1341           SKD_CHECK_STATUS_BUSY_IMMINENT },
1342 };
1343
1344 /*
1345  * Look up status and sense data to decide how to handle the error
1346  * from the device.
1347  * mask says which fields must match e.g., mask=0x18 means check
1348  * type and stat, ignore key, asc, ascq.
1349  */
1350
1351 static enum skd_check_status_action
1352 skd_check_status(struct skd_device *skdev,
1353                  u8 cmp_status, struct fit_comp_error_info *skerr)
1354 {
1355         int i;
1356
1357         dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1358                 skerr->key, skerr->code, skerr->qual, skerr->fruc);
1359
1360         dev_dbg(&skdev->pdev->dev,
1361                 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
1362                 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
1363                 skerr->fruc);
1364
1365         /* Does the info match an entry in the good category? */
1366         for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
1367                 struct sns_info *sns = &skd_chkstat_table[i];
1368
1369                 if (sns->mask & 0x10)
1370                         if (skerr->type != sns->type)
1371                                 continue;
1372
1373                 if (sns->mask & 0x08)
1374                         if (cmp_status != sns->stat)
1375                                 continue;
1376
1377                 if (sns->mask & 0x04)
1378                         if (skerr->key != sns->key)
1379                                 continue;
1380
1381                 if (sns->mask & 0x02)
1382                         if (skerr->code != sns->asc)
1383                                 continue;
1384
1385                 if (sns->mask & 0x01)
1386                         if (skerr->qual != sns->ascq)
1387                                 continue;
1388
1389                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
1390                         dev_err(&skdev->pdev->dev,
1391                                 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
1392                                 skerr->key, skerr->code, skerr->qual);
1393                 }
1394                 return sns->action;
1395         }
1396
1397         /* No other match, so nonzero status means error,
1398          * zero status means good
1399          */
1400         if (cmp_status) {
1401                 dev_dbg(&skdev->pdev->dev, "status check: error\n");
1402                 return SKD_CHECK_STATUS_REPORT_ERROR;
1403         }
1404
1405         dev_dbg(&skdev->pdev->dev, "status check good default\n");
1406         return SKD_CHECK_STATUS_REPORT_GOOD;
1407 }
1408
1409 static void skd_resolve_req_exception(struct skd_device *skdev,
1410                                       struct skd_request_context *skreq,
1411                                       struct request *req)
1412 {
1413         u8 cmp_status = skreq->completion.status;
1414
1415         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
1416         case SKD_CHECK_STATUS_REPORT_GOOD:
1417         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
1418                 skreq->status = BLK_STS_OK;
1419                 if (likely(!blk_should_fake_timeout(req->q)))
1420                         blk_mq_complete_request(req);
1421                 break;
1422
1423         case SKD_CHECK_STATUS_BUSY_IMMINENT:
1424                 skd_log_skreq(skdev, skreq, "retry(busy)");
1425                 blk_mq_requeue_request(req, true);
1426                 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
1427                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1428                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1429                 skd_quiesce_dev(skdev);
1430                 break;
1431
1432         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
1433                 if (++skreq->retries < SKD_MAX_RETRIES) {
1434                         skd_log_skreq(skdev, skreq, "retry");
1435                         blk_mq_requeue_request(req, true);
1436                         break;
1437                 }
1438                 fallthrough;
1439
1440         case SKD_CHECK_STATUS_REPORT_ERROR:
1441         default:
1442                 skreq->status = BLK_STS_IOERR;
1443                 if (likely(!blk_should_fake_timeout(req->q)))
1444                         blk_mq_complete_request(req);
1445                 break;
1446         }
1447 }
1448
1449 static void skd_release_skreq(struct skd_device *skdev,
1450                               struct skd_request_context *skreq)
1451 {
1452         /*
1453          * Reclaim the skd_request_context
1454          */
1455         skreq->state = SKD_REQ_STATE_IDLE;
1456 }
1457
1458 static int skd_isr_completion_posted(struct skd_device *skdev,
1459                                         int limit, int *enqueued)
1460 {
1461         struct fit_completion_entry_v1 *skcmp;
1462         struct fit_comp_error_info *skerr;
1463         u16 req_id;
1464         u32 tag;
1465         u16 hwq = 0;
1466         struct request *rq;
1467         struct skd_request_context *skreq;
1468         u16 cmp_cntxt;
1469         u8 cmp_status;
1470         u8 cmp_cycle;
1471         u32 cmp_bytes;
1472         int rc = 0;
1473         int processed = 0;
1474
1475         lockdep_assert_held(&skdev->lock);
1476
1477         for (;; ) {
1478                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1479
1480                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1481                 cmp_cycle = skcmp->cycle;
1482                 cmp_cntxt = skcmp->tag;
1483                 cmp_status = skcmp->status;
1484                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1485
1486                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1487
1488                 dev_dbg(&skdev->pdev->dev,
1489                         "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
1490                         skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
1491                         cmp_cntxt, cmp_status, skd_in_flight(skdev),
1492                         cmp_bytes, skdev->proto_ver);
1493
1494                 if (cmp_cycle != skdev->skcomp_cycle) {
1495                         dev_dbg(&skdev->pdev->dev, "end of completions\n");
1496                         break;
1497                 }
1498                 /*
1499                  * Update the completion queue head index and possibly
1500                  * the completion cycle count. 8-bit wrap-around.
1501                  */
1502                 skdev->skcomp_ix++;
1503                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1504                         skdev->skcomp_ix = 0;
1505                         skdev->skcomp_cycle++;
1506                 }
1507
1508                 /*
1509                  * The command context is a unique 32-bit ID. The low order
1510                  * bits help locate the request. The request is usually a
1511                  * r/w request (see skd_start() above) or a special request.
1512                  */
1513                 req_id = cmp_cntxt;
1514                 tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
1515
1516                 /* Is this other than a r/w request? */
1517                 if (tag >= skdev->num_req_context) {
1518                         /*
1519                          * This is not a completion for a r/w request.
1520                          */
1521                         WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
1522                                                       tag));
1523                         skd_complete_other(skdev, skcmp, skerr);
1524                         continue;
1525                 }
1526
1527                 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
1528                 if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
1529                          tag))
1530                         continue;
1531                 skreq = blk_mq_rq_to_pdu(rq);
1532
1533                 /*
1534                  * Make sure the request ID for the slot matches.
1535                  */
1536                 if (skreq->id != req_id) {
1537                         dev_err(&skdev->pdev->dev,
1538                                 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
1539                                 req_id, skreq->id, cmp_cntxt);
1540
1541                         continue;
1542                 }
1543
1544                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
1545
1546                 skreq->completion = *skcmp;
1547                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
1548                         skreq->err_info = *skerr;
1549                         skd_log_check_status(skdev, cmp_status, skerr->key,
1550                                              skerr->code, skerr->qual,
1551                                              skerr->fruc);
1552                 }
1553                 /* Release DMA resources for the request. */
1554                 if (skreq->n_sg > 0)
1555                         skd_postop_sg_list(skdev, skreq);
1556
1557                 skd_release_skreq(skdev, skreq);
1558
1559                 /*
1560                  * Capture the outcome and post it back to the native request.
1561                  */
1562                 if (likely(cmp_status == SAM_STAT_GOOD)) {
1563                         skreq->status = BLK_STS_OK;
1564                         if (likely(!blk_should_fake_timeout(rq->q)))
1565                                 blk_mq_complete_request(rq);
1566                 } else {
1567                         skd_resolve_req_exception(skdev, skreq, rq);
1568                 }
1569
1570                 /* skd_isr_comp_limit equal zero means no limit */
1571                 if (limit) {
1572                         if (++processed >= limit) {
1573                                 rc = 1;
1574                                 break;
1575                         }
1576                 }
1577         }
1578
1579         if (skdev->state == SKD_DRVR_STATE_PAUSING &&
1580             skd_in_flight(skdev) == 0) {
1581                 skdev->state = SKD_DRVR_STATE_PAUSED;
1582                 wake_up_interruptible(&skdev->waitq);
1583         }
1584
1585         return rc;
1586 }
1587
1588 static void skd_complete_other(struct skd_device *skdev,
1589                                struct fit_completion_entry_v1 *skcomp,
1590                                struct fit_comp_error_info *skerr)
1591 {
1592         u32 req_id = 0;
1593         u32 req_table;
1594         u32 req_slot;
1595         struct skd_special_context *skspcl;
1596
1597         lockdep_assert_held(&skdev->lock);
1598
1599         req_id = skcomp->tag;
1600         req_table = req_id & SKD_ID_TABLE_MASK;
1601         req_slot = req_id & SKD_ID_SLOT_MASK;
1602
1603         dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
1604                 req_id, req_slot);
1605
1606         /*
1607          * Based on the request id, determine how to dispatch this completion.
1608          * This swich/case is finding the good cases and forwarding the
1609          * completion entry. Errors are reported below the switch.
1610          */
1611         switch (req_table) {
1612         case SKD_ID_RW_REQUEST:
1613                 /*
1614                  * The caller, skd_isr_completion_posted() above,
1615                  * handles r/w requests. The only way we get here
1616                  * is if the req_slot is out of bounds.
1617                  */
1618                 break;
1619
1620         case SKD_ID_INTERNAL:
1621                 if (req_slot == 0) {
1622                         skspcl = &skdev->internal_skspcl;
1623                         if (skspcl->req.id == req_id &&
1624                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
1625                                 skd_complete_internal(skdev,
1626                                                       skcomp, skerr, skspcl);
1627                                 return;
1628                         }
1629                 }
1630                 break;
1631
1632         case SKD_ID_FIT_MSG:
1633                 /*
1634                  * These id's should never appear in a completion record.
1635                  */
1636                 break;
1637
1638         default:
1639                 /*
1640                  * These id's should never appear anywhere;
1641                  */
1642                 break;
1643         }
1644
1645         /*
1646          * If we get here it is a bad or stale id.
1647          */
1648 }
1649
1650 static void skd_reset_skcomp(struct skd_device *skdev)
1651 {
1652         memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
1653
1654         skdev->skcomp_ix = 0;
1655         skdev->skcomp_cycle = 1;
1656 }
1657
1658 /*
1659  *****************************************************************************
1660  * INTERRUPTS
1661  *****************************************************************************
1662  */
1663 static void skd_completion_worker(struct work_struct *work)
1664 {
1665         struct skd_device *skdev =
1666                 container_of(work, struct skd_device, completion_worker);
1667         unsigned long flags;
1668         int flush_enqueued = 0;
1669
1670         spin_lock_irqsave(&skdev->lock, flags);
1671
1672         /*
1673          * pass in limit=0, which means no limit..
1674          * process everything in compq
1675          */
1676         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
1677         schedule_work(&skdev->start_queue);
1678
1679         spin_unlock_irqrestore(&skdev->lock, flags);
1680 }
1681
1682 static void skd_isr_msg_from_dev(struct skd_device *skdev);
1683
1684 static irqreturn_t
1685 skd_isr(int irq, void *ptr)
1686 {
1687         struct skd_device *skdev = ptr;
1688         u32 intstat;
1689         u32 ack;
1690         int rc = 0;
1691         int deferred = 0;
1692         int flush_enqueued = 0;
1693
1694         spin_lock(&skdev->lock);
1695
1696         for (;; ) {
1697                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
1698
1699                 ack = FIT_INT_DEF_MASK;
1700                 ack &= intstat;
1701
1702                 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
1703                         ack);
1704
1705                 /* As long as there is an int pending on device, keep
1706                  * running loop.  When none, get out, but if we've never
1707                  * done any processing, call completion handler?
1708                  */
1709                 if (ack == 0) {
1710                         /* No interrupts on device, but run the completion
1711                          * processor anyway?
1712                          */
1713                         if (rc == 0)
1714                                 if (likely (skdev->state
1715                                         == SKD_DRVR_STATE_ONLINE))
1716                                         deferred = 1;
1717                         break;
1718                 }
1719
1720                 rc = IRQ_HANDLED;
1721
1722                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
1723
1724                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
1725                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
1726                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
1727                                 /*
1728                                  * If we have already deferred completion
1729                                  * processing, don't bother running it again
1730                                  */
1731                                 if (deferred == 0)
1732                                         deferred =
1733                                                 skd_isr_completion_posted(skdev,
1734                                                 skd_isr_comp_limit, &flush_enqueued);
1735                         }
1736
1737                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
1738                                 skd_isr_fwstate(skdev);
1739                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
1740                                     skdev->state ==
1741                                     SKD_DRVR_STATE_DISAPPEARED) {
1742                                         spin_unlock(&skdev->lock);
1743                                         return rc;
1744                                 }
1745                         }
1746
1747                         if (intstat & FIT_ISH_MSG_FROM_DEV)
1748                                 skd_isr_msg_from_dev(skdev);
1749                 }
1750         }
1751
1752         if (unlikely(flush_enqueued))
1753                 schedule_work(&skdev->start_queue);
1754
1755         if (deferred)
1756                 schedule_work(&skdev->completion_worker);
1757         else if (!flush_enqueued)
1758                 schedule_work(&skdev->start_queue);
1759
1760         spin_unlock(&skdev->lock);
1761
1762         return rc;
1763 }
1764
1765 static void skd_drive_fault(struct skd_device *skdev)
1766 {
1767         skdev->state = SKD_DRVR_STATE_FAULT;
1768         dev_err(&skdev->pdev->dev, "Drive FAULT\n");
1769 }
1770
1771 static void skd_drive_disappeared(struct skd_device *skdev)
1772 {
1773         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
1774         dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
1775 }
1776
1777 static void skd_isr_fwstate(struct skd_device *skdev)
1778 {
1779         u32 sense;
1780         u32 state;
1781         u32 mtd;
1782         int prev_driver_state = skdev->state;
1783
1784         sense = SKD_READL(skdev, FIT_STATUS);
1785         state = sense & FIT_SR_DRIVE_STATE_MASK;
1786
1787         dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
1788                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
1789                 skd_drive_state_to_str(state), state);
1790
1791         skdev->drive_state = state;
1792
1793         switch (skdev->drive_state) {
1794         case FIT_SR_DRIVE_INIT:
1795                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
1796                         skd_disable_interrupts(skdev);
1797                         break;
1798                 }
1799                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
1800                         skd_recover_requests(skdev);
1801                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
1802                         skdev->timer_countdown = SKD_STARTING_TIMO;
1803                         skdev->state = SKD_DRVR_STATE_STARTING;
1804                         skd_soft_reset(skdev);
1805                         break;
1806                 }
1807                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
1808                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1809                 skdev->last_mtd = mtd;
1810                 break;
1811
1812         case FIT_SR_DRIVE_ONLINE:
1813                 skdev->cur_max_queue_depth = skd_max_queue_depth;
1814                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
1815                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
1816
1817                 skdev->queue_low_water_mark =
1818                         skdev->cur_max_queue_depth * 2 / 3 + 1;
1819                 if (skdev->queue_low_water_mark < 1)
1820                         skdev->queue_low_water_mark = 1;
1821                 dev_info(&skdev->pdev->dev,
1822                          "Queue depth limit=%d dev=%d lowat=%d\n",
1823                          skdev->cur_max_queue_depth,
1824                          skdev->dev_max_queue_depth,
1825                          skdev->queue_low_water_mark);
1826
1827                 skd_refresh_device_data(skdev);
1828                 break;
1829
1830         case FIT_SR_DRIVE_BUSY:
1831                 skdev->state = SKD_DRVR_STATE_BUSY;
1832                 skdev->timer_countdown = SKD_BUSY_TIMO;
1833                 skd_quiesce_dev(skdev);
1834                 break;
1835         case FIT_SR_DRIVE_BUSY_SANITIZE:
1836                 /* set timer for 3 seconds, we'll abort any unfinished
1837                  * commands after that expires
1838                  */
1839                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
1840                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1841                 schedule_work(&skdev->start_queue);
1842                 break;
1843         case FIT_SR_DRIVE_BUSY_ERASE:
1844                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
1845                 skdev->timer_countdown = SKD_BUSY_TIMO;
1846                 break;
1847         case FIT_SR_DRIVE_OFFLINE:
1848                 skdev->state = SKD_DRVR_STATE_IDLE;
1849                 break;
1850         case FIT_SR_DRIVE_SOFT_RESET:
1851                 switch (skdev->state) {
1852                 case SKD_DRVR_STATE_STARTING:
1853                 case SKD_DRVR_STATE_RESTARTING:
1854                         /* Expected by a caller of skd_soft_reset() */
1855                         break;
1856                 default:
1857                         skdev->state = SKD_DRVR_STATE_RESTARTING;
1858                         break;
1859                 }
1860                 break;
1861         case FIT_SR_DRIVE_FW_BOOTING:
1862                 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
1863                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
1864                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
1865                 break;
1866
1867         case FIT_SR_DRIVE_DEGRADED:
1868         case FIT_SR_PCIE_LINK_DOWN:
1869         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
1870                 break;
1871
1872         case FIT_SR_DRIVE_FAULT:
1873                 skd_drive_fault(skdev);
1874                 skd_recover_requests(skdev);
1875                 schedule_work(&skdev->start_queue);
1876                 break;
1877
1878         /* PCIe bus returned all Fs? */
1879         case 0xFF:
1880                 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
1881                          sense);
1882                 skd_drive_disappeared(skdev);
1883                 skd_recover_requests(skdev);
1884                 schedule_work(&skdev->start_queue);
1885                 break;
1886         default:
1887                 /*
1888                  * Uknown FW State. Wait for a state we recognize.
1889                  */
1890                 break;
1891         }
1892         dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
1893                 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
1894                 skd_skdev_state_to_str(skdev->state), skdev->state);
1895 }
1896
1897 static bool skd_recover_request(struct request *req, void *data, bool reserved)
1898 {
1899         struct skd_device *const skdev = data;
1900         struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
1901
1902         if (skreq->state != SKD_REQ_STATE_BUSY)
1903                 return true;
1904
1905         skd_log_skreq(skdev, skreq, "recover");
1906
1907         /* Release DMA resources for the request. */
1908         if (skreq->n_sg > 0)
1909                 skd_postop_sg_list(skdev, skreq);
1910
1911         skreq->state = SKD_REQ_STATE_IDLE;
1912         skreq->status = BLK_STS_IOERR;
1913         blk_mq_complete_request(req);
1914         return true;
1915 }
1916
1917 static void skd_recover_requests(struct skd_device *skdev)
1918 {
1919         blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
1920 }
1921
1922 static void skd_isr_msg_from_dev(struct skd_device *skdev)
1923 {
1924         u32 mfd;
1925         u32 mtd;
1926         u32 data;
1927
1928         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
1929
1930         dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
1931                 skdev->last_mtd);
1932
1933         /* ignore any mtd that is an ack for something we didn't send */
1934         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
1935                 return;
1936
1937         switch (FIT_MXD_TYPE(mfd)) {
1938         case FIT_MTD_FITFW_INIT:
1939                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
1940
1941                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
1942                         dev_err(&skdev->pdev->dev, "protocol mismatch\n");
1943                         dev_err(&skdev->pdev->dev, "  got=%d support=%d\n",
1944                                 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
1945                         dev_err(&skdev->pdev->dev, "  please upgrade driver\n");
1946                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
1947                         skd_soft_reset(skdev);
1948                         break;
1949                 }
1950                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
1951                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1952                 skdev->last_mtd = mtd;
1953                 break;
1954
1955         case FIT_MTD_GET_CMDQ_DEPTH:
1956                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
1957                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
1958                                    SKD_N_COMPLETION_ENTRY);
1959                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1960                 skdev->last_mtd = mtd;
1961                 break;
1962
1963         case FIT_MTD_SET_COMPQ_DEPTH:
1964                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
1965                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
1966                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1967                 skdev->last_mtd = mtd;
1968                 break;
1969
1970         case FIT_MTD_SET_COMPQ_ADDR:
1971                 skd_reset_skcomp(skdev);
1972                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
1973                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1974                 skdev->last_mtd = mtd;
1975                 break;
1976
1977         case FIT_MTD_CMD_LOG_HOST_ID:
1978                 /* hardware interface overflows in y2106 */
1979                 skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
1980                 data = skdev->connect_time_stamp & 0xFFFF;
1981                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
1982                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1983                 skdev->last_mtd = mtd;
1984                 break;
1985
1986         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
1987                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
1988                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
1989                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
1990                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1991                 skdev->last_mtd = mtd;
1992                 break;
1993
1994         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
1995                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
1996                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
1997                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1998                 skdev->last_mtd = mtd;
1999
2000                 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
2001                         skdev->connect_time_stamp, skdev->drive_jiffies);
2002                 break;
2003
2004         case FIT_MTD_ARM_QUEUE:
2005                 skdev->last_mtd = 0;
2006                 /*
2007                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2008                  */
2009                 break;
2010
2011         default:
2012                 break;
2013         }
2014 }
2015
2016 static void skd_disable_interrupts(struct skd_device *skdev)
2017 {
2018         u32 sense;
2019
2020         sense = SKD_READL(skdev, FIT_CONTROL);
2021         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2022         SKD_WRITEL(skdev, sense, FIT_CONTROL);
2023         dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
2024
2025         /* Note that the 1s is written. A 1-bit means
2026          * disable, a 0 means enable.
2027          */
2028         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2029 }
2030
2031 static void skd_enable_interrupts(struct skd_device *skdev)
2032 {
2033         u32 val;
2034
2035         /* unmask interrupts first */
2036         val = FIT_ISH_FW_STATE_CHANGE +
2037               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
2038
2039         /* Note that the compliment of mask is written. A 1-bit means
2040          * disable, a 0 means enable. */
2041         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2042         dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
2043
2044         val = SKD_READL(skdev, FIT_CONTROL);
2045         val |= FIT_CR_ENABLE_INTERRUPTS;
2046         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2047         SKD_WRITEL(skdev, val, FIT_CONTROL);
2048 }
2049
2050 /*
2051  *****************************************************************************
2052  * START, STOP, RESTART, QUIESCE, UNQUIESCE
2053  *****************************************************************************
2054  */
2055
2056 static void skd_soft_reset(struct skd_device *skdev)
2057 {
2058         u32 val;
2059
2060         val = SKD_READL(skdev, FIT_CONTROL);
2061         val |= (FIT_CR_SOFT_RESET);
2062         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2063         SKD_WRITEL(skdev, val, FIT_CONTROL);
2064 }
2065
2066 static void skd_start_device(struct skd_device *skdev)
2067 {
2068         unsigned long flags;
2069         u32 sense;
2070         u32 state;
2071
2072         spin_lock_irqsave(&skdev->lock, flags);
2073
2074         /* ack all ghost interrupts */
2075         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2076
2077         sense = SKD_READL(skdev, FIT_STATUS);
2078
2079         dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
2080
2081         state = sense & FIT_SR_DRIVE_STATE_MASK;
2082         skdev->drive_state = state;
2083         skdev->last_mtd = 0;
2084
2085         skdev->state = SKD_DRVR_STATE_STARTING;
2086         skdev->timer_countdown = SKD_STARTING_TIMO;
2087
2088         skd_enable_interrupts(skdev);
2089
2090         switch (skdev->drive_state) {
2091         case FIT_SR_DRIVE_OFFLINE:
2092                 dev_err(&skdev->pdev->dev, "Drive offline...\n");
2093                 break;
2094
2095         case FIT_SR_DRIVE_FW_BOOTING:
2096                 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
2097                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2098                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2099                 break;
2100
2101         case FIT_SR_DRIVE_BUSY_SANITIZE:
2102                 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
2103                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2104                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2105                 break;
2106
2107         case FIT_SR_DRIVE_BUSY_ERASE:
2108                 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
2109                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2110                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2111                 break;
2112
2113         case FIT_SR_DRIVE_INIT:
2114         case FIT_SR_DRIVE_ONLINE:
2115                 skd_soft_reset(skdev);
2116                 break;
2117
2118         case FIT_SR_DRIVE_BUSY:
2119                 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
2120                 skdev->state = SKD_DRVR_STATE_BUSY;
2121                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2122                 break;
2123
2124         case FIT_SR_DRIVE_SOFT_RESET:
2125                 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
2126                 break;
2127
2128         case FIT_SR_DRIVE_FAULT:
2129                 /* Fault state is bad...soft reset won't do it...
2130                  * Hard reset, maybe, but does it work on device?
2131                  * For now, just fault so the system doesn't hang.
2132                  */
2133                 skd_drive_fault(skdev);
2134                 /*start the queue so we can respond with error to requests */
2135                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2136                 schedule_work(&skdev->start_queue);
2137                 skdev->gendisk_on = -1;
2138                 wake_up_interruptible(&skdev->waitq);
2139                 break;
2140
2141         case 0xFF:
2142                 /* Most likely the device isn't there or isn't responding
2143                  * to the BAR1 addresses. */
2144                 skd_drive_disappeared(skdev);
2145                 /*start the queue so we can respond with error to requests */
2146                 dev_dbg(&skdev->pdev->dev,
2147                         "starting queue to error-out reqs\n");
2148                 schedule_work(&skdev->start_queue);
2149                 skdev->gendisk_on = -1;
2150                 wake_up_interruptible(&skdev->waitq);
2151                 break;
2152
2153         default:
2154                 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
2155                         skdev->drive_state);
2156                 break;
2157         }
2158
2159         state = SKD_READL(skdev, FIT_CONTROL);
2160         dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
2161
2162         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2163         dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
2164
2165         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2166         dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
2167
2168         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2169         dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
2170
2171         state = SKD_READL(skdev, FIT_HW_VERSION);
2172         dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
2173
2174         spin_unlock_irqrestore(&skdev->lock, flags);
2175 }
2176
2177 static void skd_stop_device(struct skd_device *skdev)
2178 {
2179         unsigned long flags;
2180         struct skd_special_context *skspcl = &skdev->internal_skspcl;
2181         u32 dev_state;
2182         int i;
2183
2184         spin_lock_irqsave(&skdev->lock, flags);
2185
2186         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
2187                 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
2188                 goto stop_out;
2189         }
2190
2191         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
2192                 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
2193                 goto stop_out;
2194         }
2195
2196         skdev->state = SKD_DRVR_STATE_SYNCING;
2197         skdev->sync_done = 0;
2198
2199         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2200
2201         spin_unlock_irqrestore(&skdev->lock, flags);
2202
2203         wait_event_interruptible_timeout(skdev->waitq,
2204                                          (skdev->sync_done), (10 * HZ));
2205
2206         spin_lock_irqsave(&skdev->lock, flags);
2207
2208         switch (skdev->sync_done) {
2209         case 0:
2210                 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
2211                 break;
2212         case 1:
2213                 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
2214                 break;
2215         default:
2216                 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
2217         }
2218
2219 stop_out:
2220         skdev->state = SKD_DRVR_STATE_STOPPING;
2221         spin_unlock_irqrestore(&skdev->lock, flags);
2222
2223         skd_kill_timer(skdev);
2224
2225         spin_lock_irqsave(&skdev->lock, flags);
2226         skd_disable_interrupts(skdev);
2227
2228         /* ensure all ints on device are cleared */
2229         /* soft reset the device to unload with a clean slate */
2230         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2231         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2232
2233         spin_unlock_irqrestore(&skdev->lock, flags);
2234
2235         /* poll every 100ms, 1 second timeout */
2236         for (i = 0; i < 10; i++) {
2237                 dev_state =
2238                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
2239                 if (dev_state == FIT_SR_DRIVE_INIT)
2240                         break;
2241                 set_current_state(TASK_INTERRUPTIBLE);
2242                 schedule_timeout(msecs_to_jiffies(100));
2243         }
2244
2245         if (dev_state != FIT_SR_DRIVE_INIT)
2246                 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
2247                         dev_state);
2248 }
2249
2250 /* assume spinlock is held */
2251 static void skd_restart_device(struct skd_device *skdev)
2252 {
2253         u32 state;
2254
2255         /* ack all ghost interrupts */
2256         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2257
2258         state = SKD_READL(skdev, FIT_STATUS);
2259
2260         dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
2261
2262         state &= FIT_SR_DRIVE_STATE_MASK;
2263         skdev->drive_state = state;
2264         skdev->last_mtd = 0;
2265
2266         skdev->state = SKD_DRVR_STATE_RESTARTING;
2267         skdev->timer_countdown = SKD_RESTARTING_TIMO;
2268
2269         skd_soft_reset(skdev);
2270 }
2271
2272 /* assume spinlock is held */
2273 static int skd_quiesce_dev(struct skd_device *skdev)
2274 {
2275         int rc = 0;
2276
2277         switch (skdev->state) {
2278         case SKD_DRVR_STATE_BUSY:
2279         case SKD_DRVR_STATE_BUSY_IMMINENT:
2280                 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2281                 blk_mq_stop_hw_queues(skdev->queue);
2282                 break;
2283         case SKD_DRVR_STATE_ONLINE:
2284         case SKD_DRVR_STATE_STOPPING:
2285         case SKD_DRVR_STATE_SYNCING:
2286         case SKD_DRVR_STATE_PAUSING:
2287         case SKD_DRVR_STATE_PAUSED:
2288         case SKD_DRVR_STATE_STARTING:
2289         case SKD_DRVR_STATE_RESTARTING:
2290         case SKD_DRVR_STATE_RESUMING:
2291         default:
2292                 rc = -EINVAL;
2293                 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
2294                         skdev->state);
2295         }
2296         return rc;
2297 }
2298
2299 /* assume spinlock is held */
2300 static int skd_unquiesce_dev(struct skd_device *skdev)
2301 {
2302         int prev_driver_state = skdev->state;
2303
2304         skd_log_skdev(skdev, "unquiesce");
2305         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
2306                 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
2307                 return 0;
2308         }
2309         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
2310                 /*
2311                  * If there has been an state change to other than
2312                  * ONLINE, we will rely on controller state change
2313                  * to come back online and restart the queue.
2314                  * The BUSY state means that driver is ready to
2315                  * continue normal processing but waiting for controller
2316                  * to become available.
2317                  */
2318                 skdev->state = SKD_DRVR_STATE_BUSY;
2319                 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
2320                 return 0;
2321         }
2322
2323         /*
2324          * Drive has just come online, driver is either in startup,
2325          * paused performing a task, or bust waiting for hardware.
2326          */
2327         switch (skdev->state) {
2328         case SKD_DRVR_STATE_PAUSED:
2329         case SKD_DRVR_STATE_BUSY:
2330         case SKD_DRVR_STATE_BUSY_IMMINENT:
2331         case SKD_DRVR_STATE_BUSY_ERASE:
2332         case SKD_DRVR_STATE_STARTING:
2333         case SKD_DRVR_STATE_RESTARTING:
2334         case SKD_DRVR_STATE_FAULT:
2335         case SKD_DRVR_STATE_IDLE:
2336         case SKD_DRVR_STATE_LOAD:
2337                 skdev->state = SKD_DRVR_STATE_ONLINE;
2338                 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2339                         skd_skdev_state_to_str(prev_driver_state),
2340                         prev_driver_state, skd_skdev_state_to_str(skdev->state),
2341                         skdev->state);
2342                 dev_dbg(&skdev->pdev->dev,
2343                         "**** device ONLINE...starting block queue\n");
2344                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2345                 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
2346                 schedule_work(&skdev->start_queue);
2347                 skdev->gendisk_on = 1;
2348                 wake_up_interruptible(&skdev->waitq);
2349                 break;
2350
2351         case SKD_DRVR_STATE_DISAPPEARED:
2352         default:
2353                 dev_dbg(&skdev->pdev->dev,
2354                         "**** driver state %d, not implemented\n",
2355                         skdev->state);
2356                 return -EBUSY;
2357         }
2358         return 0;
2359 }
2360
2361 /*
2362  *****************************************************************************
2363  * PCIe MSI/MSI-X INTERRUPT HANDLERS
2364  *****************************************************************************
2365  */
2366
2367 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
2368 {
2369         struct skd_device *skdev = skd_host_data;
2370         unsigned long flags;
2371
2372         spin_lock_irqsave(&skdev->lock, flags);
2373         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2374                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2375         dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
2376                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2377         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
2378         spin_unlock_irqrestore(&skdev->lock, flags);
2379         return IRQ_HANDLED;
2380 }
2381
2382 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
2383 {
2384         struct skd_device *skdev = skd_host_data;
2385         unsigned long flags;
2386
2387         spin_lock_irqsave(&skdev->lock, flags);
2388         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2389                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2390         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
2391         skd_isr_fwstate(skdev);
2392         spin_unlock_irqrestore(&skdev->lock, flags);
2393         return IRQ_HANDLED;
2394 }
2395
2396 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
2397 {
2398         struct skd_device *skdev = skd_host_data;
2399         unsigned long flags;
2400         int flush_enqueued = 0;
2401         int deferred;
2402
2403         spin_lock_irqsave(&skdev->lock, flags);
2404         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2405                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2406         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
2407         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
2408                                                 &flush_enqueued);
2409         if (flush_enqueued)
2410                 schedule_work(&skdev->start_queue);
2411
2412         if (deferred)
2413                 schedule_work(&skdev->completion_worker);
2414         else if (!flush_enqueued)
2415                 schedule_work(&skdev->start_queue);
2416
2417         spin_unlock_irqrestore(&skdev->lock, flags);
2418
2419         return IRQ_HANDLED;
2420 }
2421
2422 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
2423 {
2424         struct skd_device *skdev = skd_host_data;
2425         unsigned long flags;
2426
2427         spin_lock_irqsave(&skdev->lock, flags);
2428         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2429                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2430         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
2431         skd_isr_msg_from_dev(skdev);
2432         spin_unlock_irqrestore(&skdev->lock, flags);
2433         return IRQ_HANDLED;
2434 }
2435
2436 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
2437 {
2438         struct skd_device *skdev = skd_host_data;
2439         unsigned long flags;
2440
2441         spin_lock_irqsave(&skdev->lock, flags);
2442         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2443                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2444         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
2445         spin_unlock_irqrestore(&skdev->lock, flags);
2446         return IRQ_HANDLED;
2447 }
2448
2449 /*
2450  *****************************************************************************
2451  * PCIe MSI/MSI-X SETUP
2452  *****************************************************************************
2453  */
2454
2455 struct skd_msix_entry {
2456         char isr_name[30];
2457 };
2458
2459 struct skd_init_msix_entry {
2460         const char *name;
2461         irq_handler_t handler;
2462 };
2463
2464 #define SKD_MAX_MSIX_COUNT              13
2465 #define SKD_MIN_MSIX_COUNT              7
2466 #define SKD_BASE_MSIX_IRQ               4
2467
2468 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
2469         { "(DMA 0)",        skd_reserved_isr },
2470         { "(DMA 1)",        skd_reserved_isr },
2471         { "(DMA 2)",        skd_reserved_isr },
2472         { "(DMA 3)",        skd_reserved_isr },
2473         { "(State Change)", skd_statec_isr   },
2474         { "(COMPL_Q)",      skd_comp_q       },
2475         { "(MSG)",          skd_msg_isr      },
2476         { "(Reserved)",     skd_reserved_isr },
2477         { "(Reserved)",     skd_reserved_isr },
2478         { "(Queue Full 0)", skd_qfull_isr    },
2479         { "(Queue Full 1)", skd_qfull_isr    },
2480         { "(Queue Full 2)", skd_qfull_isr    },
2481         { "(Queue Full 3)", skd_qfull_isr    },
2482 };
2483
2484 static int skd_acquire_msix(struct skd_device *skdev)
2485 {
2486         int i, rc;
2487         struct pci_dev *pdev = skdev->pdev;
2488
2489         rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
2490                         PCI_IRQ_MSIX);
2491         if (rc < 0) {
2492                 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
2493                 goto out;
2494         }
2495
2496         skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
2497                         sizeof(struct skd_msix_entry), GFP_KERNEL);
2498         if (!skdev->msix_entries) {
2499                 rc = -ENOMEM;
2500                 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
2501                 goto out;
2502         }
2503
2504         /* Enable MSI-X vectors for the base queue */
2505         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2506                 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
2507
2508                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
2509                          "%s%d-msix %s", DRV_NAME, skdev->devno,
2510                          msix_entries[i].name);
2511
2512                 rc = devm_request_irq(&skdev->pdev->dev,
2513                                 pci_irq_vector(skdev->pdev, i),
2514                                 msix_entries[i].handler, 0,
2515                                 qentry->isr_name, skdev);
2516                 if (rc) {
2517                         dev_err(&skdev->pdev->dev,
2518                                 "Unable to register(%d) MSI-X handler %d: %s\n",
2519                                 rc, i, qentry->isr_name);
2520                         goto msix_out;
2521                 }
2522         }
2523
2524         dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
2525                 SKD_MAX_MSIX_COUNT);
2526         return 0;
2527
2528 msix_out:
2529         while (--i >= 0)
2530                 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
2531 out:
2532         kfree(skdev->msix_entries);
2533         skdev->msix_entries = NULL;
2534         return rc;
2535 }
2536
2537 static int skd_acquire_irq(struct skd_device *skdev)
2538 {
2539         struct pci_dev *pdev = skdev->pdev;
2540         unsigned int irq_flag = PCI_IRQ_LEGACY;
2541         int rc;
2542
2543         if (skd_isr_type == SKD_IRQ_MSIX) {
2544                 rc = skd_acquire_msix(skdev);
2545                 if (!rc)
2546                         return 0;
2547
2548                 dev_err(&skdev->pdev->dev,
2549                         "failed to enable MSI-X, re-trying with MSI %d\n", rc);
2550         }
2551
2552         snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
2553                         skdev->devno);
2554
2555         if (skd_isr_type != SKD_IRQ_LEGACY)
2556                 irq_flag |= PCI_IRQ_MSI;
2557         rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
2558         if (rc < 0) {
2559                 dev_err(&skdev->pdev->dev,
2560                         "failed to allocate the MSI interrupt %d\n", rc);
2561                 return rc;
2562         }
2563
2564         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
2565                         pdev->msi_enabled ? 0 : IRQF_SHARED,
2566                         skdev->isr_name, skdev);
2567         if (rc) {
2568                 pci_free_irq_vectors(pdev);
2569                 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
2570                         rc);
2571                 return rc;
2572         }
2573
2574         return 0;
2575 }
2576
2577 static void skd_release_irq(struct skd_device *skdev)
2578 {
2579         struct pci_dev *pdev = skdev->pdev;
2580
2581         if (skdev->msix_entries) {
2582                 int i;
2583
2584                 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2585                         devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
2586                                         skdev);
2587                 }
2588
2589                 kfree(skdev->msix_entries);
2590                 skdev->msix_entries = NULL;
2591         } else {
2592                 devm_free_irq(&pdev->dev, pdev->irq, skdev);
2593         }
2594
2595         pci_free_irq_vectors(pdev);
2596 }
2597
2598 /*
2599  *****************************************************************************
2600  * CONSTRUCT
2601  *****************************************************************************
2602  */
2603
2604 static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2605                            dma_addr_t *dma_handle, gfp_t gfp,
2606                            enum dma_data_direction dir)
2607 {
2608         struct device *dev = &skdev->pdev->dev;
2609         void *buf;
2610
2611         buf = kmem_cache_alloc(s, gfp);
2612         if (!buf)
2613                 return NULL;
2614         *dma_handle = dma_map_single(dev, buf,
2615                                      kmem_cache_size(s), dir);
2616         if (dma_mapping_error(dev, *dma_handle)) {
2617                 kmem_cache_free(s, buf);
2618                 buf = NULL;
2619         }
2620         return buf;
2621 }
2622
2623 static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
2624                          void *vaddr, dma_addr_t dma_handle,
2625                          enum dma_data_direction dir)
2626 {
2627         if (!vaddr)
2628                 return;
2629
2630         dma_unmap_single(&skdev->pdev->dev, dma_handle,
2631                          kmem_cache_size(s), dir);
2632         kmem_cache_free(s, vaddr);
2633 }
2634
2635 static int skd_cons_skcomp(struct skd_device *skdev)
2636 {
2637         int rc = 0;
2638         struct fit_completion_entry_v1 *skcomp;
2639
2640         dev_dbg(&skdev->pdev->dev,
2641                 "comp pci_alloc, total bytes %zd entries %d\n",
2642                 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
2643
2644         skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2645                                     &skdev->cq_dma_address, GFP_KERNEL);
2646
2647         if (skcomp == NULL) {
2648                 rc = -ENOMEM;
2649                 goto err_out;
2650         }
2651
2652         skdev->skcomp_table = skcomp;
2653         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
2654                                                            sizeof(*skcomp) *
2655                                                            SKD_N_COMPLETION_ENTRY);
2656
2657 err_out:
2658         return rc;
2659 }
2660
2661 static int skd_cons_skmsg(struct skd_device *skdev)
2662 {
2663         int rc = 0;
2664         u32 i;
2665
2666         dev_dbg(&skdev->pdev->dev,
2667                 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
2668                 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
2669                 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
2670
2671         skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
2672                                      sizeof(struct skd_fitmsg_context),
2673                                      GFP_KERNEL);
2674         if (skdev->skmsg_table == NULL) {
2675                 rc = -ENOMEM;
2676                 goto err_out;
2677         }
2678
2679         for (i = 0; i < skdev->num_fitmsg_context; i++) {
2680                 struct skd_fitmsg_context *skmsg;
2681
2682                 skmsg = &skdev->skmsg_table[i];
2683
2684                 skmsg->id = i + SKD_ID_FIT_MSG;
2685
2686                 skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
2687                                                     SKD_N_FITMSG_BYTES,
2688                                                     &skmsg->mb_dma_address,
2689                                                     GFP_KERNEL);
2690                 if (skmsg->msg_buf == NULL) {
2691                         rc = -ENOMEM;
2692                         goto err_out;
2693                 }
2694
2695                 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
2696                      (FIT_QCMD_ALIGN - 1),
2697                      "not aligned: msg_buf %p mb_dma_address %pad\n",
2698                      skmsg->msg_buf, &skmsg->mb_dma_address);
2699         }
2700
2701 err_out:
2702         return rc;
2703 }
2704
2705 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
2706                                                   u32 n_sg,
2707                                                   dma_addr_t *ret_dma_addr)
2708 {
2709         struct fit_sg_descriptor *sg_list;
2710
2711         sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
2712                                 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
2713
2714         if (sg_list != NULL) {
2715                 uint64_t dma_address = *ret_dma_addr;
2716                 u32 i;
2717
2718                 for (i = 0; i < n_sg - 1; i++) {
2719                         uint64_t ndp_off;
2720                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
2721
2722                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
2723                 }
2724                 sg_list[i].next_desc_ptr = 0LL;
2725         }
2726
2727         return sg_list;
2728 }
2729
2730 static void skd_free_sg_list(struct skd_device *skdev,
2731                              struct fit_sg_descriptor *sg_list,
2732                              dma_addr_t dma_addr)
2733 {
2734         if (WARN_ON_ONCE(!sg_list))
2735                 return;
2736
2737         skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
2738                      DMA_TO_DEVICE);
2739 }
2740
2741 static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
2742                             unsigned int hctx_idx, unsigned int numa_node)
2743 {
2744         struct skd_device *skdev = set->driver_data;
2745         struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
2746
2747         skreq->state = SKD_REQ_STATE_IDLE;
2748         skreq->sg = (void *)(skreq + 1);
2749         sg_init_table(skreq->sg, skd_sgs_per_request);
2750         skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
2751                                             &skreq->sksg_dma_address);
2752
2753         return skreq->sksg_list ? 0 : -ENOMEM;
2754 }
2755
2756 static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
2757                              unsigned int hctx_idx)
2758 {
2759         struct skd_device *skdev = set->driver_data;
2760         struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
2761
2762         skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
2763 }
2764
2765 static int skd_cons_sksb(struct skd_device *skdev)
2766 {
2767         int rc = 0;
2768         struct skd_special_context *skspcl;
2769
2770         skspcl = &skdev->internal_skspcl;
2771
2772         skspcl->req.id = 0 + SKD_ID_INTERNAL;
2773         skspcl->req.state = SKD_REQ_STATE_IDLE;
2774
2775         skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
2776                                          &skspcl->db_dma_address,
2777                                          GFP_DMA | __GFP_ZERO,
2778                                          DMA_BIDIRECTIONAL);
2779         if (skspcl->data_buf == NULL) {
2780                 rc = -ENOMEM;
2781                 goto err_out;
2782         }
2783
2784         skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
2785                                         &skspcl->mb_dma_address,
2786                                         GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
2787         if (skspcl->msg_buf == NULL) {
2788                 rc = -ENOMEM;
2789                 goto err_out;
2790         }
2791
2792         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
2793                                                  &skspcl->req.sksg_dma_address);
2794         if (skspcl->req.sksg_list == NULL) {
2795                 rc = -ENOMEM;
2796                 goto err_out;
2797         }
2798
2799         if (!skd_format_internal_skspcl(skdev)) {
2800                 rc = -EINVAL;
2801                 goto err_out;
2802         }
2803
2804 err_out:
2805         return rc;
2806 }
2807
2808 static const struct blk_mq_ops skd_mq_ops = {
2809         .queue_rq       = skd_mq_queue_rq,
2810         .complete       = skd_complete_rq,
2811         .timeout        = skd_timed_out,
2812         .init_request   = skd_init_request,
2813         .exit_request   = skd_exit_request,
2814 };
2815
2816 static int skd_cons_disk(struct skd_device *skdev)
2817 {
2818         int rc = 0;
2819         struct gendisk *disk;
2820         struct request_queue *q;
2821         unsigned long flags;
2822
2823         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
2824         if (!disk) {
2825                 rc = -ENOMEM;
2826                 goto err_out;
2827         }
2828
2829         skdev->disk = disk;
2830         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
2831
2832         disk->major = skdev->major;
2833         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
2834         disk->fops = &skd_blockdev_ops;
2835         disk->private_data = skdev;
2836
2837         memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
2838         skdev->tag_set.ops = &skd_mq_ops;
2839         skdev->tag_set.nr_hw_queues = 1;
2840         skdev->tag_set.queue_depth = skd_max_queue_depth;
2841         skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
2842                 skdev->sgs_per_request * sizeof(struct scatterlist);
2843         skdev->tag_set.numa_node = NUMA_NO_NODE;
2844         skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2845                 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
2846         skdev->tag_set.driver_data = skdev;
2847         rc = blk_mq_alloc_tag_set(&skdev->tag_set);
2848         if (rc)
2849                 goto err_out;
2850         q = blk_mq_init_queue(&skdev->tag_set);
2851         if (IS_ERR(q)) {
2852                 blk_mq_free_tag_set(&skdev->tag_set);
2853                 rc = PTR_ERR(q);
2854                 goto err_out;
2855         }
2856         q->queuedata = skdev;
2857
2858         skdev->queue = q;
2859         disk->queue = q;
2860
2861         blk_queue_write_cache(q, true, true);
2862         blk_queue_max_segments(q, skdev->sgs_per_request);
2863         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
2864
2865         /* set optimal I/O size to 8KB */
2866         blk_queue_io_opt(q, 8192);
2867
2868         blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2869         blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2870
2871         blk_queue_rq_timeout(q, 8 * HZ);
2872
2873         spin_lock_irqsave(&skdev->lock, flags);
2874         dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2875         blk_mq_stop_hw_queues(skdev->queue);
2876         spin_unlock_irqrestore(&skdev->lock, flags);
2877
2878 err_out:
2879         return rc;
2880 }
2881
2882 #define SKD_N_DEV_TABLE         16u
2883 static u32 skd_next_devno;
2884
2885 static struct skd_device *skd_construct(struct pci_dev *pdev)
2886 {
2887         struct skd_device *skdev;
2888         int blk_major = skd_major;
2889         size_t size;
2890         int rc;
2891
2892         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
2893
2894         if (!skdev) {
2895                 dev_err(&pdev->dev, "memory alloc failure\n");
2896                 return NULL;
2897         }
2898
2899         skdev->state = SKD_DRVR_STATE_LOAD;
2900         skdev->pdev = pdev;
2901         skdev->devno = skd_next_devno++;
2902         skdev->major = blk_major;
2903         skdev->dev_max_queue_depth = 0;
2904
2905         skdev->num_req_context = skd_max_queue_depth;
2906         skdev->num_fitmsg_context = skd_max_queue_depth;
2907         skdev->cur_max_queue_depth = 1;
2908         skdev->queue_low_water_mark = 1;
2909         skdev->proto_ver = 99;
2910         skdev->sgs_per_request = skd_sgs_per_request;
2911         skdev->dbg_level = skd_dbg_level;
2912
2913         spin_lock_init(&skdev->lock);
2914
2915         INIT_WORK(&skdev->start_queue, skd_start_queue);
2916         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
2917
2918         size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
2919         skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
2920                                                 SLAB_HWCACHE_ALIGN, NULL);
2921         if (!skdev->msgbuf_cache)
2922                 goto err_out;
2923         WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
2924                   "skd-msgbuf: %d < %zd\n",
2925                   kmem_cache_size(skdev->msgbuf_cache), size);
2926         size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
2927         skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
2928                                                 SLAB_HWCACHE_ALIGN, NULL);
2929         if (!skdev->sglist_cache)
2930                 goto err_out;
2931         WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
2932                   "skd-sglist: %d < %zd\n",
2933                   kmem_cache_size(skdev->sglist_cache), size);
2934         size = SKD_N_INTERNAL_BYTES;
2935         skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
2936                                                  SLAB_HWCACHE_ALIGN, NULL);
2937         if (!skdev->databuf_cache)
2938                 goto err_out;
2939         WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
2940                   "skd-databuf: %d < %zd\n",
2941                   kmem_cache_size(skdev->databuf_cache), size);
2942
2943         dev_dbg(&skdev->pdev->dev, "skcomp\n");
2944         rc = skd_cons_skcomp(skdev);
2945         if (rc < 0)
2946                 goto err_out;
2947
2948         dev_dbg(&skdev->pdev->dev, "skmsg\n");
2949         rc = skd_cons_skmsg(skdev);
2950         if (rc < 0)
2951                 goto err_out;
2952
2953         dev_dbg(&skdev->pdev->dev, "sksb\n");
2954         rc = skd_cons_sksb(skdev);
2955         if (rc < 0)
2956                 goto err_out;
2957
2958         dev_dbg(&skdev->pdev->dev, "disk\n");
2959         rc = skd_cons_disk(skdev);
2960         if (rc < 0)
2961                 goto err_out;
2962
2963         dev_dbg(&skdev->pdev->dev, "VICTORY\n");
2964         return skdev;
2965
2966 err_out:
2967         dev_dbg(&skdev->pdev->dev, "construct failed\n");
2968         skd_destruct(skdev);
2969         return NULL;
2970 }
2971
2972 /*
2973  *****************************************************************************
2974  * DESTRUCT (FREE)
2975  *****************************************************************************
2976  */
2977
2978 static void skd_free_skcomp(struct skd_device *skdev)
2979 {
2980         if (skdev->skcomp_table)
2981                 dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2982                                   skdev->skcomp_table, skdev->cq_dma_address);
2983
2984         skdev->skcomp_table = NULL;
2985         skdev->cq_dma_address = 0;
2986 }
2987
2988 static void skd_free_skmsg(struct skd_device *skdev)
2989 {
2990         u32 i;
2991
2992         if (skdev->skmsg_table == NULL)
2993                 return;
2994
2995         for (i = 0; i < skdev->num_fitmsg_context; i++) {
2996                 struct skd_fitmsg_context *skmsg;
2997
2998                 skmsg = &skdev->skmsg_table[i];
2999
3000                 if (skmsg->msg_buf != NULL) {
3001                         dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
3002                                           skmsg->msg_buf,
3003                                             skmsg->mb_dma_address);
3004                 }
3005                 skmsg->msg_buf = NULL;
3006                 skmsg->mb_dma_address = 0;
3007         }
3008
3009         kfree(skdev->skmsg_table);
3010         skdev->skmsg_table = NULL;
3011 }
3012
3013 static void skd_free_sksb(struct skd_device *skdev)
3014 {
3015         struct skd_special_context *skspcl = &skdev->internal_skspcl;
3016
3017         skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
3018                      skspcl->db_dma_address, DMA_BIDIRECTIONAL);
3019
3020         skspcl->data_buf = NULL;
3021         skspcl->db_dma_address = 0;
3022
3023         skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
3024                      skspcl->mb_dma_address, DMA_TO_DEVICE);
3025
3026         skspcl->msg_buf = NULL;
3027         skspcl->mb_dma_address = 0;
3028
3029         skd_free_sg_list(skdev, skspcl->req.sksg_list,
3030                          skspcl->req.sksg_dma_address);
3031
3032         skspcl->req.sksg_list = NULL;
3033         skspcl->req.sksg_dma_address = 0;
3034 }
3035
3036 static void skd_free_disk(struct skd_device *skdev)
3037 {
3038         struct gendisk *disk = skdev->disk;
3039
3040         if (disk && (disk->flags & GENHD_FL_UP))
3041                 del_gendisk(disk);
3042
3043         if (skdev->queue) {
3044                 blk_cleanup_queue(skdev->queue);
3045                 skdev->queue = NULL;
3046                 if (disk)
3047                         disk->queue = NULL;
3048         }
3049
3050         if (skdev->tag_set.tags)
3051                 blk_mq_free_tag_set(&skdev->tag_set);
3052
3053         put_disk(disk);
3054         skdev->disk = NULL;
3055 }
3056
3057 static void skd_destruct(struct skd_device *skdev)
3058 {
3059         if (skdev == NULL)
3060                 return;
3061
3062         cancel_work_sync(&skdev->start_queue);
3063
3064         dev_dbg(&skdev->pdev->dev, "disk\n");
3065         skd_free_disk(skdev);
3066
3067         dev_dbg(&skdev->pdev->dev, "sksb\n");
3068         skd_free_sksb(skdev);
3069
3070         dev_dbg(&skdev->pdev->dev, "skmsg\n");
3071         skd_free_skmsg(skdev);
3072
3073         dev_dbg(&skdev->pdev->dev, "skcomp\n");
3074         skd_free_skcomp(skdev);
3075
3076         kmem_cache_destroy(skdev->databuf_cache);
3077         kmem_cache_destroy(skdev->sglist_cache);
3078         kmem_cache_destroy(skdev->msgbuf_cache);
3079
3080         dev_dbg(&skdev->pdev->dev, "skdev\n");
3081         kfree(skdev);
3082 }
3083
3084 /*
3085  *****************************************************************************
3086  * BLOCK DEVICE (BDEV) GLUE
3087  *****************************************************************************
3088  */
3089
3090 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3091 {
3092         struct skd_device *skdev;
3093         u64 capacity;
3094
3095         skdev = bdev->bd_disk->private_data;
3096
3097         dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
3098                 bdev->bd_disk->disk_name, current->comm);
3099
3100         if (skdev->read_cap_is_valid) {
3101                 capacity = get_capacity(skdev->disk);
3102                 geo->heads = 64;
3103                 geo->sectors = 255;
3104                 geo->cylinders = (capacity) / (255 * 64);
3105
3106                 return 0;
3107         }
3108         return -EIO;
3109 }
3110
3111 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
3112 {
3113         dev_dbg(&skdev->pdev->dev, "add_disk\n");
3114         device_add_disk(parent, skdev->disk, NULL);
3115         return 0;
3116 }
3117
3118 static const struct block_device_operations skd_blockdev_ops = {
3119         .owner          = THIS_MODULE,
3120         .getgeo         = skd_bdev_getgeo,
3121 };
3122
3123 /*
3124  *****************************************************************************
3125  * PCIe DRIVER GLUE
3126  *****************************************************************************
3127  */
3128
3129 static const struct pci_device_id skd_pci_tbl[] = {
3130         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
3131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
3132         { 0 }                     /* terminate list */
3133 };
3134
3135 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
3136
3137 static char *skd_pci_info(struct skd_device *skdev, char *str)
3138 {
3139         int pcie_reg;
3140
3141         strcpy(str, "PCIe (");
3142         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
3143
3144         if (pcie_reg) {
3145
3146                 char lwstr[6];
3147                 uint16_t pcie_lstat, lspeed, lwidth;
3148
3149                 pcie_reg += 0x12;
3150                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
3151                 lspeed = pcie_lstat & (0xF);
3152                 lwidth = (pcie_lstat & 0x3F0) >> 4;
3153
3154                 if (lspeed == 1)
3155                         strcat(str, "2.5GT/s ");
3156                 else if (lspeed == 2)
3157                         strcat(str, "5.0GT/s ");
3158                 else
3159                         strcat(str, "<unknown> ");
3160                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
3161                 strcat(str, lwstr);
3162         }
3163         return str;
3164 }
3165
3166 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3167 {
3168         int i;
3169         int rc = 0;
3170         char pci_str[32];
3171         struct skd_device *skdev;
3172
3173         dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
3174                 pdev->device);
3175
3176         rc = pci_enable_device(pdev);
3177         if (rc)
3178                 return rc;
3179         rc = pci_request_regions(pdev, DRV_NAME);
3180         if (rc)
3181                 goto err_out;
3182         rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3183         if (rc)
3184                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3185         if (rc) {
3186                 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3187                 goto err_out_regions;
3188         }
3189
3190         if (!skd_major) {
3191                 rc = register_blkdev(0, DRV_NAME);
3192                 if (rc < 0)
3193                         goto err_out_regions;
3194                 BUG_ON(!rc);
3195                 skd_major = rc;
3196         }
3197
3198         skdev = skd_construct(pdev);
3199         if (skdev == NULL) {
3200                 rc = -ENOMEM;
3201                 goto err_out_regions;
3202         }
3203
3204         skd_pci_info(skdev, pci_str);
3205         dev_info(&pdev->dev, "%s 64bit\n", pci_str);
3206
3207         pci_set_master(pdev);
3208         rc = pci_enable_pcie_error_reporting(pdev);
3209         if (rc) {
3210                 dev_err(&pdev->dev,
3211                         "bad enable of PCIe error reporting rc=%d\n", rc);
3212                 skdev->pcie_error_reporting_is_enabled = 0;
3213         } else
3214                 skdev->pcie_error_reporting_is_enabled = 1;
3215
3216         pci_set_drvdata(pdev, skdev);
3217
3218         for (i = 0; i < SKD_MAX_BARS; i++) {
3219                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3220                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3221                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3222                                             skdev->mem_size[i]);
3223                 if (!skdev->mem_map[i]) {
3224                         dev_err(&pdev->dev,
3225                                 "Unable to map adapter memory!\n");
3226                         rc = -ENODEV;
3227                         goto err_out_iounmap;
3228                 }
3229                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3230                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3231                         skdev->mem_size[i]);
3232         }
3233
3234         rc = skd_acquire_irq(skdev);
3235         if (rc) {
3236                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3237                 goto err_out_iounmap;
3238         }
3239
3240         rc = skd_start_timer(skdev);
3241         if (rc)
3242                 goto err_out_timer;
3243
3244         init_waitqueue_head(&skdev->waitq);
3245
3246         skd_start_device(skdev);
3247
3248         rc = wait_event_interruptible_timeout(skdev->waitq,
3249                                               (skdev->gendisk_on),
3250                                               (SKD_START_WAIT_SECONDS * HZ));
3251         if (skdev->gendisk_on > 0) {
3252                 /* device came on-line after reset */
3253                 skd_bdev_attach(&pdev->dev, skdev);
3254                 rc = 0;
3255         } else {
3256                 /* we timed out, something is wrong with the device,
3257                    don't add the disk structure */
3258                 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
3259                         rc);
3260                 /* in case of no error; we timeout with ENXIO */
3261                 if (!rc)
3262                         rc = -ENXIO;
3263                 goto err_out_timer;
3264         }
3265
3266         return rc;
3267
3268 err_out_timer:
3269         skd_stop_device(skdev);
3270         skd_release_irq(skdev);
3271
3272 err_out_iounmap:
3273         for (i = 0; i < SKD_MAX_BARS; i++)
3274                 if (skdev->mem_map[i])
3275                         iounmap(skdev->mem_map[i]);
3276
3277         if (skdev->pcie_error_reporting_is_enabled)
3278                 pci_disable_pcie_error_reporting(pdev);
3279
3280         skd_destruct(skdev);
3281
3282 err_out_regions:
3283         pci_release_regions(pdev);
3284
3285 err_out:
3286         pci_disable_device(pdev);
3287         pci_set_drvdata(pdev, NULL);
3288         return rc;
3289 }
3290
3291 static void skd_pci_remove(struct pci_dev *pdev)
3292 {
3293         int i;
3294         struct skd_device *skdev;
3295
3296         skdev = pci_get_drvdata(pdev);
3297         if (!skdev) {
3298                 dev_err(&pdev->dev, "no device data for PCI\n");
3299                 return;
3300         }
3301         skd_stop_device(skdev);
3302         skd_release_irq(skdev);
3303
3304         for (i = 0; i < SKD_MAX_BARS; i++)
3305                 if (skdev->mem_map[i])
3306                         iounmap(skdev->mem_map[i]);
3307
3308         if (skdev->pcie_error_reporting_is_enabled)
3309                 pci_disable_pcie_error_reporting(pdev);
3310
3311         skd_destruct(skdev);
3312
3313         pci_release_regions(pdev);
3314         pci_disable_device(pdev);
3315         pci_set_drvdata(pdev, NULL);
3316
3317         return;
3318 }
3319
3320 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3321 {
3322         int i;
3323         struct skd_device *skdev;
3324
3325         skdev = pci_get_drvdata(pdev);
3326         if (!skdev) {
3327                 dev_err(&pdev->dev, "no device data for PCI\n");
3328                 return -EIO;
3329         }
3330
3331         skd_stop_device(skdev);
3332
3333         skd_release_irq(skdev);
3334
3335         for (i = 0; i < SKD_MAX_BARS; i++)
3336                 if (skdev->mem_map[i])
3337                         iounmap(skdev->mem_map[i]);
3338
3339         if (skdev->pcie_error_reporting_is_enabled)
3340                 pci_disable_pcie_error_reporting(pdev);
3341
3342         pci_release_regions(pdev);
3343         pci_save_state(pdev);
3344         pci_disable_device(pdev);
3345         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3346         return 0;
3347 }
3348
3349 static int skd_pci_resume(struct pci_dev *pdev)
3350 {
3351         int i;
3352         int rc = 0;
3353         struct skd_device *skdev;
3354
3355         skdev = pci_get_drvdata(pdev);
3356         if (!skdev) {
3357                 dev_err(&pdev->dev, "no device data for PCI\n");
3358                 return -1;
3359         }
3360
3361         pci_set_power_state(pdev, PCI_D0);
3362         pci_enable_wake(pdev, PCI_D0, 0);
3363         pci_restore_state(pdev);
3364
3365         rc = pci_enable_device(pdev);
3366         if (rc)
3367                 return rc;
3368         rc = pci_request_regions(pdev, DRV_NAME);
3369         if (rc)
3370                 goto err_out;
3371         rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3372         if (rc)
3373                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3374         if (rc) {
3375                 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3376                 goto err_out_regions;
3377         }
3378
3379         pci_set_master(pdev);
3380         rc = pci_enable_pcie_error_reporting(pdev);
3381         if (rc) {
3382                 dev_err(&pdev->dev,
3383                         "bad enable of PCIe error reporting rc=%d\n", rc);
3384                 skdev->pcie_error_reporting_is_enabled = 0;
3385         } else
3386                 skdev->pcie_error_reporting_is_enabled = 1;
3387
3388         for (i = 0; i < SKD_MAX_BARS; i++) {
3389
3390                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3391                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3392                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3393                                             skdev->mem_size[i]);
3394                 if (!skdev->mem_map[i]) {
3395                         dev_err(&pdev->dev, "Unable to map adapter memory!\n");
3396                         rc = -ENODEV;
3397                         goto err_out_iounmap;
3398                 }
3399                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3400                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3401                         skdev->mem_size[i]);
3402         }
3403         rc = skd_acquire_irq(skdev);
3404         if (rc) {
3405                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3406                 goto err_out_iounmap;
3407         }
3408
3409         rc = skd_start_timer(skdev);
3410         if (rc)
3411                 goto err_out_timer;
3412
3413         init_waitqueue_head(&skdev->waitq);
3414
3415         skd_start_device(skdev);
3416
3417         return rc;
3418
3419 err_out_timer:
3420         skd_stop_device(skdev);
3421         skd_release_irq(skdev);
3422
3423 err_out_iounmap:
3424         for (i = 0; i < SKD_MAX_BARS; i++)
3425                 if (skdev->mem_map[i])
3426                         iounmap(skdev->mem_map[i]);
3427
3428         if (skdev->pcie_error_reporting_is_enabled)
3429                 pci_disable_pcie_error_reporting(pdev);
3430
3431 err_out_regions:
3432         pci_release_regions(pdev);
3433
3434 err_out:
3435         pci_disable_device(pdev);
3436         return rc;
3437 }
3438
3439 static void skd_pci_shutdown(struct pci_dev *pdev)
3440 {
3441         struct skd_device *skdev;
3442
3443         dev_err(&pdev->dev, "%s called\n", __func__);
3444
3445         skdev = pci_get_drvdata(pdev);
3446         if (!skdev) {
3447                 dev_err(&pdev->dev, "no device data for PCI\n");
3448                 return;
3449         }
3450
3451         dev_err(&pdev->dev, "calling stop\n");
3452         skd_stop_device(skdev);
3453 }
3454
3455 static struct pci_driver skd_driver = {
3456         .name           = DRV_NAME,
3457         .id_table       = skd_pci_tbl,
3458         .probe          = skd_pci_probe,
3459         .remove         = skd_pci_remove,
3460         .suspend        = skd_pci_suspend,
3461         .resume         = skd_pci_resume,
3462         .shutdown       = skd_pci_shutdown,
3463 };
3464
3465 /*
3466  *****************************************************************************
3467  * LOGGING SUPPORT
3468  *****************************************************************************
3469  */
3470
3471 const char *skd_drive_state_to_str(int state)
3472 {
3473         switch (state) {
3474         case FIT_SR_DRIVE_OFFLINE:
3475                 return "OFFLINE";
3476         case FIT_SR_DRIVE_INIT:
3477                 return "INIT";
3478         case FIT_SR_DRIVE_ONLINE:
3479                 return "ONLINE";
3480         case FIT_SR_DRIVE_BUSY:
3481                 return "BUSY";
3482         case FIT_SR_DRIVE_FAULT:
3483                 return "FAULT";
3484         case FIT_SR_DRIVE_DEGRADED:
3485                 return "DEGRADED";
3486         case FIT_SR_PCIE_LINK_DOWN:
3487                 return "INK_DOWN";
3488         case FIT_SR_DRIVE_SOFT_RESET:
3489                 return "SOFT_RESET";
3490         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3491                 return "NEED_FW";
3492         case FIT_SR_DRIVE_INIT_FAULT:
3493                 return "INIT_FAULT";
3494         case FIT_SR_DRIVE_BUSY_SANITIZE:
3495                 return "BUSY_SANITIZE";
3496         case FIT_SR_DRIVE_BUSY_ERASE:
3497                 return "BUSY_ERASE";
3498         case FIT_SR_DRIVE_FW_BOOTING:
3499                 return "FW_BOOTING";
3500         default:
3501                 return "???";
3502         }
3503 }
3504
3505 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
3506 {
3507         switch (state) {
3508         case SKD_DRVR_STATE_LOAD:
3509                 return "LOAD";
3510         case SKD_DRVR_STATE_IDLE:
3511                 return "IDLE";
3512         case SKD_DRVR_STATE_BUSY:
3513                 return "BUSY";
3514         case SKD_DRVR_STATE_STARTING:
3515                 return "STARTING";
3516         case SKD_DRVR_STATE_ONLINE:
3517                 return "ONLINE";
3518         case SKD_DRVR_STATE_PAUSING:
3519                 return "PAUSING";
3520         case SKD_DRVR_STATE_PAUSED:
3521                 return "PAUSED";
3522         case SKD_DRVR_STATE_RESTARTING:
3523                 return "RESTARTING";
3524         case SKD_DRVR_STATE_RESUMING:
3525                 return "RESUMING";
3526         case SKD_DRVR_STATE_STOPPING:
3527                 return "STOPPING";
3528         case SKD_DRVR_STATE_SYNCING:
3529                 return "SYNCING";
3530         case SKD_DRVR_STATE_FAULT:
3531                 return "FAULT";
3532         case SKD_DRVR_STATE_DISAPPEARED:
3533                 return "DISAPPEARED";
3534         case SKD_DRVR_STATE_BUSY_ERASE:
3535                 return "BUSY_ERASE";
3536         case SKD_DRVR_STATE_BUSY_SANITIZE:
3537                 return "BUSY_SANITIZE";
3538         case SKD_DRVR_STATE_BUSY_IMMINENT:
3539                 return "BUSY_IMMINENT";
3540         case SKD_DRVR_STATE_WAIT_BOOT:
3541                 return "WAIT_BOOT";
3542
3543         default:
3544                 return "???";
3545         }
3546 }
3547
3548 static const char *skd_skreq_state_to_str(enum skd_req_state state)
3549 {
3550         switch (state) {
3551         case SKD_REQ_STATE_IDLE:
3552                 return "IDLE";
3553         case SKD_REQ_STATE_SETUP:
3554                 return "SETUP";
3555         case SKD_REQ_STATE_BUSY:
3556                 return "BUSY";
3557         case SKD_REQ_STATE_COMPLETED:
3558                 return "COMPLETED";
3559         case SKD_REQ_STATE_TIMEOUT:
3560                 return "TIMEOUT";
3561         default:
3562                 return "???";
3563         }
3564 }
3565
3566 static void skd_log_skdev(struct skd_device *skdev, const char *event)
3567 {
3568         dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
3569         dev_dbg(&skdev->pdev->dev, "  drive_state=%s(%d) driver_state=%s(%d)\n",
3570                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3571                 skd_skdev_state_to_str(skdev->state), skdev->state);
3572         dev_dbg(&skdev->pdev->dev, "  busy=%d limit=%d dev=%d lowat=%d\n",
3573                 skd_in_flight(skdev), skdev->cur_max_queue_depth,
3574                 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3575         dev_dbg(&skdev->pdev->dev, "  cycle=%d cycle_ix=%d\n",
3576                 skdev->skcomp_cycle, skdev->skcomp_ix);
3577 }
3578
3579 static void skd_log_skreq(struct skd_device *skdev,
3580                           struct skd_request_context *skreq, const char *event)
3581 {
3582         struct request *req = blk_mq_rq_from_pdu(skreq);
3583         u32 lba = blk_rq_pos(req);
3584         u32 count = blk_rq_sectors(req);
3585
3586         dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
3587         dev_dbg(&skdev->pdev->dev, "  state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3588                 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
3589                 skreq->fitmsg_id);
3590         dev_dbg(&skdev->pdev->dev, "  sg_dir=%d n_sg=%d\n",
3591                 skreq->data_dir, skreq->n_sg);
3592
3593         dev_dbg(&skdev->pdev->dev,
3594                 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
3595                 count, count, (int)rq_data_dir(req));
3596 }
3597
3598 /*
3599  *****************************************************************************
3600  * MODULE GLUE
3601  *****************************************************************************
3602  */
3603
3604 static int __init skd_init(void)
3605 {
3606         BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
3607         BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
3608         BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
3609         BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
3610         BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
3611         BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
3612         BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
3613         BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
3614
3615         switch (skd_isr_type) {
3616         case SKD_IRQ_LEGACY:
3617         case SKD_IRQ_MSI:
3618         case SKD_IRQ_MSIX:
3619                 break;
3620         default:
3621                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
3622                        skd_isr_type, SKD_IRQ_DEFAULT);
3623                 skd_isr_type = SKD_IRQ_DEFAULT;
3624         }
3625
3626         if (skd_max_queue_depth < 1 ||
3627             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3628                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
3629                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3630                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3631         }
3632
3633         if (skd_max_req_per_msg < 1 ||
3634             skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
3635                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
3636                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3637                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3638         }
3639
3640         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
3641                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
3642                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3643                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3644         }
3645
3646         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
3647                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
3648                        skd_dbg_level, 0);
3649                 skd_dbg_level = 0;
3650         }
3651
3652         if (skd_isr_comp_limit < 0) {
3653                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
3654                        skd_isr_comp_limit, 0);
3655                 skd_isr_comp_limit = 0;
3656         }
3657
3658         return pci_register_driver(&skd_driver);
3659 }
3660
3661 static void __exit skd_exit(void)
3662 {
3663         pci_unregister_driver(&skd_driver);
3664
3665         if (skd_major)
3666                 unregister_blkdev(skd_major, DRV_NAME);
3667 }
3668
3669 module_init(skd_init);
3670 module_exit(skd_exit);