Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-microblaze.git] / drivers / nvme / host / pci.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6
7 #include <linux/acpi.h>
8 #include <linux/aer.h>
9 #include <linux/async.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-mq-pci.h>
13 #include <linux/dmi.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/once.h>
21 #include <linux/pci.h>
22 #include <linux/suspend.h>
23 #include <linux/t10-pi.h>
24 #include <linux/types.h>
25 #include <linux/io-64-nonatomic-lo-hi.h>
26 #include <linux/sed-opal.h>
27 #include <linux/pci-p2pdma.h>
28
29 #include "trace.h"
30 #include "nvme.h"
31
32 #define SQ_SIZE(q)      ((q)->q_depth << (q)->sqes)
33 #define CQ_SIZE(q)      ((q)->q_depth * sizeof(struct nvme_completion))
34
35 #define SGES_PER_PAGE   (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
36
37 /*
38  * These can be higher, but we need to ensure that any command doesn't
39  * require an sg allocation that needs more than a page of data.
40  */
41 #define NVME_MAX_KB_SZ  4096
42 #define NVME_MAX_SEGS   127
43
44 static int use_threaded_interrupts;
45 module_param(use_threaded_interrupts, int, 0);
46
47 static bool use_cmb_sqes = true;
48 module_param(use_cmb_sqes, bool, 0444);
49 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
50
51 static unsigned int max_host_mem_size_mb = 128;
52 module_param(max_host_mem_size_mb, uint, 0444);
53 MODULE_PARM_DESC(max_host_mem_size_mb,
54         "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
55
56 static unsigned int sgl_threshold = SZ_32K;
57 module_param(sgl_threshold, uint, 0644);
58 MODULE_PARM_DESC(sgl_threshold,
59                 "Use SGLs when average request segment size is larger or equal to "
60                 "this size. Use 0 to disable SGLs.");
61
62 static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
63 static const struct kernel_param_ops io_queue_depth_ops = {
64         .set = io_queue_depth_set,
65         .get = param_get_uint,
66 };
67
68 static unsigned int io_queue_depth = 1024;
69 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
70 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
71
72 static int io_queue_count_set(const char *val, const struct kernel_param *kp)
73 {
74         unsigned int n;
75         int ret;
76
77         ret = kstrtouint(val, 10, &n);
78         if (ret != 0 || n > num_possible_cpus())
79                 return -EINVAL;
80         return param_set_uint(val, kp);
81 }
82
83 static const struct kernel_param_ops io_queue_count_ops = {
84         .set = io_queue_count_set,
85         .get = param_get_uint,
86 };
87
88 static unsigned int write_queues;
89 module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
90 MODULE_PARM_DESC(write_queues,
91         "Number of queues to use for writes. If not set, reads and writes "
92         "will share a queue set.");
93
94 static unsigned int poll_queues;
95 module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
96 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
97
98 static bool noacpi;
99 module_param(noacpi, bool, 0444);
100 MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
101
102 struct nvme_dev;
103 struct nvme_queue;
104
105 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
106 static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
107
108 /*
109  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
110  */
111 struct nvme_dev {
112         struct nvme_queue *queues;
113         struct blk_mq_tag_set tagset;
114         struct blk_mq_tag_set admin_tagset;
115         u32 __iomem *dbs;
116         struct device *dev;
117         struct dma_pool *prp_page_pool;
118         struct dma_pool *prp_small_pool;
119         unsigned online_queues;
120         unsigned max_qid;
121         unsigned io_queues[HCTX_MAX_TYPES];
122         unsigned int num_vecs;
123         u16 q_depth;
124         int io_sqes;
125         u32 db_stride;
126         void __iomem *bar;
127         unsigned long bar_mapped_size;
128         struct work_struct remove_work;
129         struct mutex shutdown_lock;
130         bool subsystem;
131         u64 cmb_size;
132         bool cmb_use_sqes;
133         u32 cmbsz;
134         u32 cmbloc;
135         struct nvme_ctrl ctrl;
136         u32 last_ps;
137
138         mempool_t *iod_mempool;
139
140         /* shadow doorbell buffer support: */
141         u32 *dbbuf_dbs;
142         dma_addr_t dbbuf_dbs_dma_addr;
143         u32 *dbbuf_eis;
144         dma_addr_t dbbuf_eis_dma_addr;
145
146         /* host memory buffer support: */
147         u64 host_mem_size;
148         u32 nr_host_mem_descs;
149         dma_addr_t host_mem_descs_dma;
150         struct nvme_host_mem_buf_desc *host_mem_descs;
151         void **host_mem_desc_bufs;
152         unsigned int nr_allocated_queues;
153         unsigned int nr_write_queues;
154         unsigned int nr_poll_queues;
155 };
156
157 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
158 {
159         int ret;
160         u16 n;
161
162         ret = kstrtou16(val, 10, &n);
163         if (ret != 0 || n < 2)
164                 return -EINVAL;
165
166         return param_set_ushort(val, kp);
167 }
168
169 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
170 {
171         return qid * 2 * stride;
172 }
173
174 static inline unsigned int cq_idx(unsigned int qid, u32 stride)
175 {
176         return (qid * 2 + 1) * stride;
177 }
178
179 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
180 {
181         return container_of(ctrl, struct nvme_dev, ctrl);
182 }
183
184 /*
185  * An NVM Express queue.  Each device has at least two (one for admin
186  * commands and one for I/O commands).
187  */
188 struct nvme_queue {
189         struct nvme_dev *dev;
190         spinlock_t sq_lock;
191         void *sq_cmds;
192          /* only used for poll queues: */
193         spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
194         struct nvme_completion *cqes;
195         dma_addr_t sq_dma_addr;
196         dma_addr_t cq_dma_addr;
197         u32 __iomem *q_db;
198         u16 q_depth;
199         u16 cq_vector;
200         u16 sq_tail;
201         u16 cq_head;
202         u16 qid;
203         u8 cq_phase;
204         u8 sqes;
205         unsigned long flags;
206 #define NVMEQ_ENABLED           0
207 #define NVMEQ_SQ_CMB            1
208 #define NVMEQ_DELETE_ERROR      2
209 #define NVMEQ_POLLED            3
210         u32 *dbbuf_sq_db;
211         u32 *dbbuf_cq_db;
212         u32 *dbbuf_sq_ei;
213         u32 *dbbuf_cq_ei;
214         struct completion delete_done;
215 };
216
217 /*
218  * The nvme_iod describes the data in an I/O.
219  *
220  * The sg pointer contains the list of PRP/SGL chunk allocations in addition
221  * to the actual struct scatterlist.
222  */
223 struct nvme_iod {
224         struct nvme_request req;
225         struct nvme_queue *nvmeq;
226         bool use_sgl;
227         int aborted;
228         int npages;             /* In the PRP list. 0 means small pool in use */
229         int nents;              /* Used in scatterlist */
230         dma_addr_t first_dma;
231         unsigned int dma_len;   /* length of single DMA segment mapping */
232         dma_addr_t meta_dma;
233         struct scatterlist *sg;
234 };
235
236 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
237 {
238         return dev->nr_allocated_queues * 8 * dev->db_stride;
239 }
240
241 static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
242 {
243         unsigned int mem_size = nvme_dbbuf_size(dev);
244
245         if (dev->dbbuf_dbs)
246                 return 0;
247
248         dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
249                                             &dev->dbbuf_dbs_dma_addr,
250                                             GFP_KERNEL);
251         if (!dev->dbbuf_dbs)
252                 return -ENOMEM;
253         dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
254                                             &dev->dbbuf_eis_dma_addr,
255                                             GFP_KERNEL);
256         if (!dev->dbbuf_eis) {
257                 dma_free_coherent(dev->dev, mem_size,
258                                   dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
259                 dev->dbbuf_dbs = NULL;
260                 return -ENOMEM;
261         }
262
263         return 0;
264 }
265
266 static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
267 {
268         unsigned int mem_size = nvme_dbbuf_size(dev);
269
270         if (dev->dbbuf_dbs) {
271                 dma_free_coherent(dev->dev, mem_size,
272                                   dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
273                 dev->dbbuf_dbs = NULL;
274         }
275         if (dev->dbbuf_eis) {
276                 dma_free_coherent(dev->dev, mem_size,
277                                   dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
278                 dev->dbbuf_eis = NULL;
279         }
280 }
281
282 static void nvme_dbbuf_init(struct nvme_dev *dev,
283                             struct nvme_queue *nvmeq, int qid)
284 {
285         if (!dev->dbbuf_dbs || !qid)
286                 return;
287
288         nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
289         nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
290         nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
291         nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
292 }
293
294 static void nvme_dbbuf_set(struct nvme_dev *dev)
295 {
296         struct nvme_command c;
297
298         if (!dev->dbbuf_dbs)
299                 return;
300
301         memset(&c, 0, sizeof(c));
302         c.dbbuf.opcode = nvme_admin_dbbuf;
303         c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
304         c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
305
306         if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
307                 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
308                 /* Free memory and continue on */
309                 nvme_dbbuf_dma_free(dev);
310         }
311 }
312
313 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
314 {
315         return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
316 }
317
318 /* Update dbbuf and return true if an MMIO is required */
319 static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
320                                               volatile u32 *dbbuf_ei)
321 {
322         if (dbbuf_db) {
323                 u16 old_value;
324
325                 /*
326                  * Ensure that the queue is written before updating
327                  * the doorbell in memory
328                  */
329                 wmb();
330
331                 old_value = *dbbuf_db;
332                 *dbbuf_db = value;
333
334                 /*
335                  * Ensure that the doorbell is updated before reading the event
336                  * index from memory.  The controller needs to provide similar
337                  * ordering to ensure the envent index is updated before reading
338                  * the doorbell.
339                  */
340                 mb();
341
342                 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
343                         return false;
344         }
345
346         return true;
347 }
348
349 /*
350  * Will slightly overestimate the number of pages needed.  This is OK
351  * as it only leads to a small amount of wasted memory for the lifetime of
352  * the I/O.
353  */
354 static int nvme_pci_npages_prp(void)
355 {
356         unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
357                                       NVME_CTRL_PAGE_SIZE);
358         return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
359 }
360
361 /*
362  * Calculates the number of pages needed for the SGL segments. For example a 4k
363  * page can accommodate 256 SGL descriptors.
364  */
365 static int nvme_pci_npages_sgl(void)
366 {
367         return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
368                         PAGE_SIZE);
369 }
370
371 static size_t nvme_pci_iod_alloc_size(void)
372 {
373         size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
374
375         return sizeof(__le64 *) * npages +
376                 sizeof(struct scatterlist) * NVME_MAX_SEGS;
377 }
378
379 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
380                                 unsigned int hctx_idx)
381 {
382         struct nvme_dev *dev = data;
383         struct nvme_queue *nvmeq = &dev->queues[0];
384
385         WARN_ON(hctx_idx != 0);
386         WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
387
388         hctx->driver_data = nvmeq;
389         return 0;
390 }
391
392 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
393                           unsigned int hctx_idx)
394 {
395         struct nvme_dev *dev = data;
396         struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
397
398         WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
399         hctx->driver_data = nvmeq;
400         return 0;
401 }
402
403 static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
404                 unsigned int hctx_idx, unsigned int numa_node)
405 {
406         struct nvme_dev *dev = set->driver_data;
407         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
408         int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
409         struct nvme_queue *nvmeq = &dev->queues[queue_idx];
410
411         BUG_ON(!nvmeq);
412         iod->nvmeq = nvmeq;
413
414         nvme_req(req)->ctrl = &dev->ctrl;
415         return 0;
416 }
417
418 static int queue_irq_offset(struct nvme_dev *dev)
419 {
420         /* if we have more than 1 vec, admin queue offsets us by 1 */
421         if (dev->num_vecs > 1)
422                 return 1;
423
424         return 0;
425 }
426
427 static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
428 {
429         struct nvme_dev *dev = set->driver_data;
430         int i, qoff, offset;
431
432         offset = queue_irq_offset(dev);
433         for (i = 0, qoff = 0; i < set->nr_maps; i++) {
434                 struct blk_mq_queue_map *map = &set->map[i];
435
436                 map->nr_queues = dev->io_queues[i];
437                 if (!map->nr_queues) {
438                         BUG_ON(i == HCTX_TYPE_DEFAULT);
439                         continue;
440                 }
441
442                 /*
443                  * The poll queue(s) doesn't have an IRQ (and hence IRQ
444                  * affinity), so use the regular blk-mq cpu mapping
445                  */
446                 map->queue_offset = qoff;
447                 if (i != HCTX_TYPE_POLL && offset)
448                         blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
449                 else
450                         blk_mq_map_queues(map);
451                 qoff += map->nr_queues;
452                 offset += map->nr_queues;
453         }
454
455         return 0;
456 }
457
458 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq)
459 {
460         if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
461                         nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
462                 writel(nvmeq->sq_tail, nvmeq->q_db);
463 }
464
465 /**
466  * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
467  * @nvmeq: The queue to use
468  * @cmd: The command to send
469  * @write_sq: whether to write to the SQ doorbell
470  */
471 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
472                             bool write_sq)
473 {
474         spin_lock(&nvmeq->sq_lock);
475         memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
476                cmd, sizeof(*cmd));
477         if (++nvmeq->sq_tail == nvmeq->q_depth)
478                 nvmeq->sq_tail = 0;
479         if (write_sq)
480                 nvme_write_sq_db(nvmeq);
481         spin_unlock(&nvmeq->sq_lock);
482 }
483
484 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
485 {
486         struct nvme_queue *nvmeq = hctx->driver_data;
487
488         spin_lock(&nvmeq->sq_lock);
489         nvme_write_sq_db(nvmeq);
490         spin_unlock(&nvmeq->sq_lock);
491 }
492
493 static void **nvme_pci_iod_list(struct request *req)
494 {
495         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
496         return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
497 }
498
499 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
500 {
501         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
502         int nseg = blk_rq_nr_phys_segments(req);
503         unsigned int avg_seg_size;
504
505         avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
506
507         if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
508                 return false;
509         if (!iod->nvmeq->qid)
510                 return false;
511         if (!sgl_threshold || avg_seg_size < sgl_threshold)
512                 return false;
513         return true;
514 }
515
516 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
517 {
518         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
519         const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
520         dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
521         int i;
522
523         if (iod->dma_len) {
524                 dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
525                                rq_dma_dir(req));
526                 return;
527         }
528
529         WARN_ON_ONCE(!iod->nents);
530
531         if (is_pci_p2pdma_page(sg_page(iod->sg)))
532                 pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
533                                     rq_dma_dir(req));
534         else
535                 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
536
537
538         if (iod->npages == 0)
539                 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
540                         dma_addr);
541
542         for (i = 0; i < iod->npages; i++) {
543                 void *addr = nvme_pci_iod_list(req)[i];
544
545                 if (iod->use_sgl) {
546                         struct nvme_sgl_desc *sg_list = addr;
547
548                         next_dma_addr =
549                             le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
550                 } else {
551                         __le64 *prp_list = addr;
552
553                         next_dma_addr = le64_to_cpu(prp_list[last_prp]);
554                 }
555
556                 dma_pool_free(dev->prp_page_pool, addr, dma_addr);
557                 dma_addr = next_dma_addr;
558         }
559
560         mempool_free(iod->sg, dev->iod_mempool);
561 }
562
563 static void nvme_print_sgl(struct scatterlist *sgl, int nents)
564 {
565         int i;
566         struct scatterlist *sg;
567
568         for_each_sg(sgl, sg, nents, i) {
569                 dma_addr_t phys = sg_phys(sg);
570                 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
571                         "dma_address:%pad dma_length:%d\n",
572                         i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
573                         sg_dma_len(sg));
574         }
575 }
576
577 static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
578                 struct request *req, struct nvme_rw_command *cmnd)
579 {
580         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
581         struct dma_pool *pool;
582         int length = blk_rq_payload_bytes(req);
583         struct scatterlist *sg = iod->sg;
584         int dma_len = sg_dma_len(sg);
585         u64 dma_addr = sg_dma_address(sg);
586         int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
587         __le64 *prp_list;
588         void **list = nvme_pci_iod_list(req);
589         dma_addr_t prp_dma;
590         int nprps, i;
591
592         length -= (NVME_CTRL_PAGE_SIZE - offset);
593         if (length <= 0) {
594                 iod->first_dma = 0;
595                 goto done;
596         }
597
598         dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
599         if (dma_len) {
600                 dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
601         } else {
602                 sg = sg_next(sg);
603                 dma_addr = sg_dma_address(sg);
604                 dma_len = sg_dma_len(sg);
605         }
606
607         if (length <= NVME_CTRL_PAGE_SIZE) {
608                 iod->first_dma = dma_addr;
609                 goto done;
610         }
611
612         nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
613         if (nprps <= (256 / 8)) {
614                 pool = dev->prp_small_pool;
615                 iod->npages = 0;
616         } else {
617                 pool = dev->prp_page_pool;
618                 iod->npages = 1;
619         }
620
621         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
622         if (!prp_list) {
623                 iod->first_dma = dma_addr;
624                 iod->npages = -1;
625                 return BLK_STS_RESOURCE;
626         }
627         list[0] = prp_list;
628         iod->first_dma = prp_dma;
629         i = 0;
630         for (;;) {
631                 if (i == NVME_CTRL_PAGE_SIZE >> 3) {
632                         __le64 *old_prp_list = prp_list;
633                         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
634                         if (!prp_list)
635                                 return BLK_STS_RESOURCE;
636                         list[iod->npages++] = prp_list;
637                         prp_list[0] = old_prp_list[i - 1];
638                         old_prp_list[i - 1] = cpu_to_le64(prp_dma);
639                         i = 1;
640                 }
641                 prp_list[i++] = cpu_to_le64(dma_addr);
642                 dma_len -= NVME_CTRL_PAGE_SIZE;
643                 dma_addr += NVME_CTRL_PAGE_SIZE;
644                 length -= NVME_CTRL_PAGE_SIZE;
645                 if (length <= 0)
646                         break;
647                 if (dma_len > 0)
648                         continue;
649                 if (unlikely(dma_len < 0))
650                         goto bad_sgl;
651                 sg = sg_next(sg);
652                 dma_addr = sg_dma_address(sg);
653                 dma_len = sg_dma_len(sg);
654         }
655
656 done:
657         cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
658         cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
659
660         return BLK_STS_OK;
661
662  bad_sgl:
663         WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
664                         "Invalid SGL for payload:%d nents:%d\n",
665                         blk_rq_payload_bytes(req), iod->nents);
666         return BLK_STS_IOERR;
667 }
668
669 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
670                 struct scatterlist *sg)
671 {
672         sge->addr = cpu_to_le64(sg_dma_address(sg));
673         sge->length = cpu_to_le32(sg_dma_len(sg));
674         sge->type = NVME_SGL_FMT_DATA_DESC << 4;
675 }
676
677 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
678                 dma_addr_t dma_addr, int entries)
679 {
680         sge->addr = cpu_to_le64(dma_addr);
681         if (entries < SGES_PER_PAGE) {
682                 sge->length = cpu_to_le32(entries * sizeof(*sge));
683                 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
684         } else {
685                 sge->length = cpu_to_le32(PAGE_SIZE);
686                 sge->type = NVME_SGL_FMT_SEG_DESC << 4;
687         }
688 }
689
690 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
691                 struct request *req, struct nvme_rw_command *cmd, int entries)
692 {
693         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
694         struct dma_pool *pool;
695         struct nvme_sgl_desc *sg_list;
696         struct scatterlist *sg = iod->sg;
697         dma_addr_t sgl_dma;
698         int i = 0;
699
700         /* setting the transfer type as SGL */
701         cmd->flags = NVME_CMD_SGL_METABUF;
702
703         if (entries == 1) {
704                 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
705                 return BLK_STS_OK;
706         }
707
708         if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
709                 pool = dev->prp_small_pool;
710                 iod->npages = 0;
711         } else {
712                 pool = dev->prp_page_pool;
713                 iod->npages = 1;
714         }
715
716         sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
717         if (!sg_list) {
718                 iod->npages = -1;
719                 return BLK_STS_RESOURCE;
720         }
721
722         nvme_pci_iod_list(req)[0] = sg_list;
723         iod->first_dma = sgl_dma;
724
725         nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
726
727         do {
728                 if (i == SGES_PER_PAGE) {
729                         struct nvme_sgl_desc *old_sg_desc = sg_list;
730                         struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
731
732                         sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
733                         if (!sg_list)
734                                 return BLK_STS_RESOURCE;
735
736                         i = 0;
737                         nvme_pci_iod_list(req)[iod->npages++] = sg_list;
738                         sg_list[i++] = *link;
739                         nvme_pci_sgl_set_seg(link, sgl_dma, entries);
740                 }
741
742                 nvme_pci_sgl_set_data(&sg_list[i++], sg);
743                 sg = sg_next(sg);
744         } while (--entries > 0);
745
746         return BLK_STS_OK;
747 }
748
749 static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
750                 struct request *req, struct nvme_rw_command *cmnd,
751                 struct bio_vec *bv)
752 {
753         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
754         unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
755         unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
756
757         iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
758         if (dma_mapping_error(dev->dev, iod->first_dma))
759                 return BLK_STS_RESOURCE;
760         iod->dma_len = bv->bv_len;
761
762         cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
763         if (bv->bv_len > first_prp_len)
764                 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
765         return BLK_STS_OK;
766 }
767
768 static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
769                 struct request *req, struct nvme_rw_command *cmnd,
770                 struct bio_vec *bv)
771 {
772         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
773
774         iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
775         if (dma_mapping_error(dev->dev, iod->first_dma))
776                 return BLK_STS_RESOURCE;
777         iod->dma_len = bv->bv_len;
778
779         cmnd->flags = NVME_CMD_SGL_METABUF;
780         cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
781         cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
782         cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
783         return BLK_STS_OK;
784 }
785
786 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
787                 struct nvme_command *cmnd)
788 {
789         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
790         blk_status_t ret = BLK_STS_RESOURCE;
791         int nr_mapped;
792
793         if (blk_rq_nr_phys_segments(req) == 1) {
794                 struct bio_vec bv = req_bvec(req);
795
796                 if (!is_pci_p2pdma_page(bv.bv_page)) {
797                         if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
798                                 return nvme_setup_prp_simple(dev, req,
799                                                              &cmnd->rw, &bv);
800
801                         if (iod->nvmeq->qid &&
802                             dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
803                                 return nvme_setup_sgl_simple(dev, req,
804                                                              &cmnd->rw, &bv);
805                 }
806         }
807
808         iod->dma_len = 0;
809         iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
810         if (!iod->sg)
811                 return BLK_STS_RESOURCE;
812         sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
813         iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
814         if (!iod->nents)
815                 goto out;
816
817         if (is_pci_p2pdma_page(sg_page(iod->sg)))
818                 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
819                                 iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
820         else
821                 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
822                                              rq_dma_dir(req), DMA_ATTR_NO_WARN);
823         if (!nr_mapped)
824                 goto out;
825
826         iod->use_sgl = nvme_pci_use_sgls(dev, req);
827         if (iod->use_sgl)
828                 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
829         else
830                 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
831 out:
832         if (ret != BLK_STS_OK)
833                 nvme_unmap_data(dev, req);
834         return ret;
835 }
836
837 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
838                 struct nvme_command *cmnd)
839 {
840         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
841
842         iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
843                         rq_dma_dir(req), 0);
844         if (dma_mapping_error(dev->dev, iod->meta_dma))
845                 return BLK_STS_IOERR;
846         cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
847         return BLK_STS_OK;
848 }
849
850 /*
851  * NOTE: ns is NULL when called on the admin queue.
852  */
853 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
854                          const struct blk_mq_queue_data *bd)
855 {
856         struct nvme_ns *ns = hctx->queue->queuedata;
857         struct nvme_queue *nvmeq = hctx->driver_data;
858         struct nvme_dev *dev = nvmeq->dev;
859         struct request *req = bd->rq;
860         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
861         struct nvme_command cmnd;
862         blk_status_t ret;
863
864         iod->aborted = 0;
865         iod->npages = -1;
866         iod->nents = 0;
867
868         /*
869          * We should not need to do this, but we're still using this to
870          * ensure we can drain requests on a dying queue.
871          */
872         if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
873                 return BLK_STS_IOERR;
874
875         ret = nvme_setup_cmd(ns, req, &cmnd);
876         if (ret)
877                 return ret;
878
879         if (blk_rq_nr_phys_segments(req)) {
880                 ret = nvme_map_data(dev, req, &cmnd);
881                 if (ret)
882                         goto out_free_cmd;
883         }
884
885         if (blk_integrity_rq(req)) {
886                 ret = nvme_map_metadata(dev, req, &cmnd);
887                 if (ret)
888                         goto out_unmap_data;
889         }
890
891         blk_mq_start_request(req);
892         nvme_submit_cmd(nvmeq, &cmnd, bd->last);
893         return BLK_STS_OK;
894 out_unmap_data:
895         nvme_unmap_data(dev, req);
896 out_free_cmd:
897         nvme_cleanup_cmd(req);
898         return ret;
899 }
900
901 static void nvme_pci_complete_rq(struct request *req)
902 {
903         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
904         struct nvme_dev *dev = iod->nvmeq->dev;
905
906         if (blk_integrity_rq(req))
907                 dma_unmap_page(dev->dev, iod->meta_dma,
908                                rq_integrity_vec(req)->bv_len, rq_data_dir(req));
909         if (blk_rq_nr_phys_segments(req))
910                 nvme_unmap_data(dev, req);
911         nvme_complete_rq(req);
912 }
913
914 /* We read the CQE phase first to check if the rest of the entry is valid */
915 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
916 {
917         struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
918
919         return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
920 }
921
922 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
923 {
924         u16 head = nvmeq->cq_head;
925
926         if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
927                                               nvmeq->dbbuf_cq_ei))
928                 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
929 }
930
931 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
932 {
933         if (!nvmeq->qid)
934                 return nvmeq->dev->admin_tagset.tags[0];
935         return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
936 }
937
938 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
939 {
940         struct nvme_completion *cqe = &nvmeq->cqes[idx];
941         struct request *req;
942
943         if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
944                 dev_warn(nvmeq->dev->ctrl.device,
945                         "invalid id %d completed on queue %d\n",
946                         cqe->command_id, le16_to_cpu(cqe->sq_id));
947                 return;
948         }
949
950         /*
951          * AEN requests are special as they don't time out and can
952          * survive any kind of queue freeze and often don't respond to
953          * aborts.  We don't even bother to allocate a struct request
954          * for them but rather special case them here.
955          */
956         if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
957                 nvme_complete_async_event(&nvmeq->dev->ctrl,
958                                 cqe->status, &cqe->result);
959                 return;
960         }
961
962         req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
963         trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
964         if (!nvme_end_request(req, cqe->status, cqe->result))
965                 nvme_pci_complete_rq(req);
966 }
967
968 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
969 {
970         u16 tmp = nvmeq->cq_head + 1;
971
972         if (tmp == nvmeq->q_depth) {
973                 nvmeq->cq_head = 0;
974                 nvmeq->cq_phase ^= 1;
975         } else {
976                 nvmeq->cq_head = tmp;
977         }
978 }
979
980 static inline int nvme_process_cq(struct nvme_queue *nvmeq)
981 {
982         int found = 0;
983
984         while (nvme_cqe_pending(nvmeq)) {
985                 found++;
986                 /*
987                  * load-load control dependency between phase and the rest of
988                  * the cqe requires a full read memory barrier
989                  */
990                 dma_rmb();
991                 nvme_handle_cqe(nvmeq, nvmeq->cq_head);
992                 nvme_update_cq_head(nvmeq);
993         }
994
995         if (found)
996                 nvme_ring_cq_doorbell(nvmeq);
997         return found;
998 }
999
1000 static irqreturn_t nvme_irq(int irq, void *data)
1001 {
1002         struct nvme_queue *nvmeq = data;
1003         irqreturn_t ret = IRQ_NONE;
1004
1005         /*
1006          * The rmb/wmb pair ensures we see all updates from a previous run of
1007          * the irq handler, even if that was on another CPU.
1008          */
1009         rmb();
1010         if (nvme_process_cq(nvmeq))
1011                 ret = IRQ_HANDLED;
1012         wmb();
1013
1014         return ret;
1015 }
1016
1017 static irqreturn_t nvme_irq_check(int irq, void *data)
1018 {
1019         struct nvme_queue *nvmeq = data;
1020
1021         if (nvme_cqe_pending(nvmeq))
1022                 return IRQ_WAKE_THREAD;
1023         return IRQ_NONE;
1024 }
1025
1026 /*
1027  * Poll for completions for any interrupt driven queue
1028  * Can be called from any context.
1029  */
1030 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
1031 {
1032         struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1033
1034         WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
1035
1036         disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1037         nvme_process_cq(nvmeq);
1038         enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1039 }
1040
1041 static int nvme_poll(struct blk_mq_hw_ctx *hctx)
1042 {
1043         struct nvme_queue *nvmeq = hctx->driver_data;
1044         bool found;
1045
1046         if (!nvme_cqe_pending(nvmeq))
1047                 return 0;
1048
1049         spin_lock(&nvmeq->cq_poll_lock);
1050         found = nvme_process_cq(nvmeq);
1051         spin_unlock(&nvmeq->cq_poll_lock);
1052
1053         return found;
1054 }
1055
1056 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1057 {
1058         struct nvme_dev *dev = to_nvme_dev(ctrl);
1059         struct nvme_queue *nvmeq = &dev->queues[0];
1060         struct nvme_command c;
1061
1062         memset(&c, 0, sizeof(c));
1063         c.common.opcode = nvme_admin_async_event;
1064         c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1065         nvme_submit_cmd(nvmeq, &c, true);
1066 }
1067
1068 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1069 {
1070         struct nvme_command c;
1071
1072         memset(&c, 0, sizeof(c));
1073         c.delete_queue.opcode = opcode;
1074         c.delete_queue.qid = cpu_to_le16(id);
1075
1076         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1077 }
1078
1079 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1080                 struct nvme_queue *nvmeq, s16 vector)
1081 {
1082         struct nvme_command c;
1083         int flags = NVME_QUEUE_PHYS_CONTIG;
1084
1085         if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
1086                 flags |= NVME_CQ_IRQ_ENABLED;
1087
1088         /*
1089          * Note: we (ab)use the fact that the prp fields survive if no data
1090          * is attached to the request.
1091          */
1092         memset(&c, 0, sizeof(c));
1093         c.create_cq.opcode = nvme_admin_create_cq;
1094         c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1095         c.create_cq.cqid = cpu_to_le16(qid);
1096         c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1097         c.create_cq.cq_flags = cpu_to_le16(flags);
1098         c.create_cq.irq_vector = cpu_to_le16(vector);
1099
1100         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1101 }
1102
1103 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1104                                                 struct nvme_queue *nvmeq)
1105 {
1106         struct nvme_ctrl *ctrl = &dev->ctrl;
1107         struct nvme_command c;
1108         int flags = NVME_QUEUE_PHYS_CONTIG;
1109
1110         /*
1111          * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
1112          * set. Since URGENT priority is zeroes, it makes all queues
1113          * URGENT.
1114          */
1115         if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
1116                 flags |= NVME_SQ_PRIO_MEDIUM;
1117
1118         /*
1119          * Note: we (ab)use the fact that the prp fields survive if no data
1120          * is attached to the request.
1121          */
1122         memset(&c, 0, sizeof(c));
1123         c.create_sq.opcode = nvme_admin_create_sq;
1124         c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1125         c.create_sq.sqid = cpu_to_le16(qid);
1126         c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1127         c.create_sq.sq_flags = cpu_to_le16(flags);
1128         c.create_sq.cqid = cpu_to_le16(qid);
1129
1130         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1131 }
1132
1133 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
1134 {
1135         return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
1136 }
1137
1138 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
1139 {
1140         return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
1141 }
1142
1143 static void abort_endio(struct request *req, blk_status_t error)
1144 {
1145         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1146         struct nvme_queue *nvmeq = iod->nvmeq;
1147
1148         dev_warn(nvmeq->dev->ctrl.device,
1149                  "Abort status: 0x%x", nvme_req(req)->status);
1150         atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1151         blk_mq_free_request(req);
1152 }
1153
1154 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1155 {
1156         /* If true, indicates loss of adapter communication, possibly by a
1157          * NVMe Subsystem reset.
1158          */
1159         bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1160
1161         /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1162         switch (dev->ctrl.state) {
1163         case NVME_CTRL_RESETTING:
1164         case NVME_CTRL_CONNECTING:
1165                 return false;
1166         default:
1167                 break;
1168         }
1169
1170         /* We shouldn't reset unless the controller is on fatal error state
1171          * _or_ if we lost the communication with it.
1172          */
1173         if (!(csts & NVME_CSTS_CFS) && !nssro)
1174                 return false;
1175
1176         return true;
1177 }
1178
1179 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1180 {
1181         /* Read a config register to help see what died. */
1182         u16 pci_status;
1183         int result;
1184
1185         result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1186                                       &pci_status);
1187         if (result == PCIBIOS_SUCCESSFUL)
1188                 dev_warn(dev->ctrl.device,
1189                          "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1190                          csts, pci_status);
1191         else
1192                 dev_warn(dev->ctrl.device,
1193                          "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1194                          csts, result);
1195 }
1196
1197 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1198 {
1199         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1200         struct nvme_queue *nvmeq = iod->nvmeq;
1201         struct nvme_dev *dev = nvmeq->dev;
1202         struct request *abort_req;
1203         struct nvme_command cmd;
1204         u32 csts = readl(dev->bar + NVME_REG_CSTS);
1205
1206         /* If PCI error recovery process is happening, we cannot reset or
1207          * the recovery mechanism will surely fail.
1208          */
1209         mb();
1210         if (pci_channel_offline(to_pci_dev(dev->dev)))
1211                 return BLK_EH_RESET_TIMER;
1212
1213         /*
1214          * Reset immediately if the controller is failed
1215          */
1216         if (nvme_should_reset(dev, csts)) {
1217                 nvme_warn_reset(dev, csts);
1218                 nvme_dev_disable(dev, false);
1219                 nvme_reset_ctrl(&dev->ctrl);
1220                 return BLK_EH_DONE;
1221         }
1222
1223         /*
1224          * Did we miss an interrupt?
1225          */
1226         if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
1227                 nvme_poll(req->mq_hctx);
1228         else
1229                 nvme_poll_irqdisable(nvmeq);
1230
1231         if (blk_mq_request_completed(req)) {
1232                 dev_warn(dev->ctrl.device,
1233                          "I/O %d QID %d timeout, completion polled\n",
1234                          req->tag, nvmeq->qid);
1235                 return BLK_EH_DONE;
1236         }
1237
1238         /*
1239          * Shutdown immediately if controller times out while starting. The
1240          * reset work will see the pci device disabled when it gets the forced
1241          * cancellation error. All outstanding requests are completed on
1242          * shutdown, so we return BLK_EH_DONE.
1243          */
1244         switch (dev->ctrl.state) {
1245         case NVME_CTRL_CONNECTING:
1246                 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1247                 /* fall through */
1248         case NVME_CTRL_DELETING:
1249                 dev_warn_ratelimited(dev->ctrl.device,
1250                          "I/O %d QID %d timeout, disable controller\n",
1251                          req->tag, nvmeq->qid);
1252                 nvme_dev_disable(dev, true);
1253                 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1254                 return BLK_EH_DONE;
1255         case NVME_CTRL_RESETTING:
1256                 return BLK_EH_RESET_TIMER;
1257         default:
1258                 break;
1259         }
1260
1261         /*
1262          * Shutdown the controller immediately and schedule a reset if the
1263          * command was already aborted once before and still hasn't been
1264          * returned to the driver, or if this is the admin queue.
1265          */
1266         if (!nvmeq->qid || iod->aborted) {
1267                 dev_warn(dev->ctrl.device,
1268                          "I/O %d QID %d timeout, reset controller\n",
1269                          req->tag, nvmeq->qid);
1270                 nvme_dev_disable(dev, false);
1271                 nvme_reset_ctrl(&dev->ctrl);
1272
1273                 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1274                 return BLK_EH_DONE;
1275         }
1276
1277         if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1278                 atomic_inc(&dev->ctrl.abort_limit);
1279                 return BLK_EH_RESET_TIMER;
1280         }
1281         iod->aborted = 1;
1282
1283         memset(&cmd, 0, sizeof(cmd));
1284         cmd.abort.opcode = nvme_admin_abort_cmd;
1285         cmd.abort.cid = req->tag;
1286         cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1287
1288         dev_warn(nvmeq->dev->ctrl.device,
1289                 "I/O %d QID %d timeout, aborting\n",
1290                  req->tag, nvmeq->qid);
1291
1292         abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1293                         BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1294         if (IS_ERR(abort_req)) {
1295                 atomic_inc(&dev->ctrl.abort_limit);
1296                 return BLK_EH_RESET_TIMER;
1297         }
1298
1299         abort_req->timeout = ADMIN_TIMEOUT;
1300         abort_req->end_io_data = NULL;
1301         blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
1302
1303         /*
1304          * The aborted req will be completed on receiving the abort req.
1305          * We enable the timer again. If hit twice, it'll cause a device reset,
1306          * as the device then is in a faulty state.
1307          */
1308         return BLK_EH_RESET_TIMER;
1309 }
1310
1311 static void nvme_free_queue(struct nvme_queue *nvmeq)
1312 {
1313         dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
1314                                 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1315         if (!nvmeq->sq_cmds)
1316                 return;
1317
1318         if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
1319                 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
1320                                 nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1321         } else {
1322                 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
1323                                 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1324         }
1325 }
1326
1327 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1328 {
1329         int i;
1330
1331         for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
1332                 dev->ctrl.queue_count--;
1333                 nvme_free_queue(&dev->queues[i]);
1334         }
1335 }
1336
1337 /**
1338  * nvme_suspend_queue - put queue into suspended state
1339  * @nvmeq: queue to suspend
1340  */
1341 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1342 {
1343         if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
1344                 return 1;
1345
1346         /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
1347         mb();
1348
1349         nvmeq->dev->online_queues--;
1350         if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1351                 blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
1352         if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
1353                 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
1354         return 0;
1355 }
1356
1357 static void nvme_suspend_io_queues(struct nvme_dev *dev)
1358 {
1359         int i;
1360
1361         for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1362                 nvme_suspend_queue(&dev->queues[i]);
1363 }
1364
1365 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1366 {
1367         struct nvme_queue *nvmeq = &dev->queues[0];
1368
1369         if (shutdown)
1370                 nvme_shutdown_ctrl(&dev->ctrl);
1371         else
1372                 nvme_disable_ctrl(&dev->ctrl);
1373
1374         nvme_poll_irqdisable(nvmeq);
1375 }
1376
1377 /*
1378  * Called only on a device that has been disabled and after all other threads
1379  * that can check this device's completion queues have synced, except
1380  * nvme_poll(). This is the last chance for the driver to see a natural
1381  * completion before nvme_cancel_request() terminates all incomplete requests.
1382  */
1383 static void nvme_reap_pending_cqes(struct nvme_dev *dev)
1384 {
1385         int i;
1386
1387         for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
1388                 spin_lock(&dev->queues[i].cq_poll_lock);
1389                 nvme_process_cq(&dev->queues[i]);
1390                 spin_unlock(&dev->queues[i].cq_poll_lock);
1391         }
1392 }
1393
1394 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1395                                 int entry_size)
1396 {
1397         int q_depth = dev->q_depth;
1398         unsigned q_size_aligned = roundup(q_depth * entry_size,
1399                                           NVME_CTRL_PAGE_SIZE);
1400
1401         if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1402                 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1403
1404                 mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
1405                 q_depth = div_u64(mem_per_q, entry_size);
1406
1407                 /*
1408                  * Ensure the reduced q_depth is above some threshold where it
1409                  * would be better to map queues in system memory with the
1410                  * original depth
1411                  */
1412                 if (q_depth < 64)
1413                         return -ENOMEM;
1414         }
1415
1416         return q_depth;
1417 }
1418
1419 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1420                                 int qid)
1421 {
1422         struct pci_dev *pdev = to_pci_dev(dev->dev);
1423
1424         if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1425                 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
1426                 if (nvmeq->sq_cmds) {
1427                         nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
1428                                                         nvmeq->sq_cmds);
1429                         if (nvmeq->sq_dma_addr) {
1430                                 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
1431                                 return 0;
1432                         }
1433
1434                         pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1435                 }
1436         }
1437
1438         nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
1439                                 &nvmeq->sq_dma_addr, GFP_KERNEL);
1440         if (!nvmeq->sq_cmds)
1441                 return -ENOMEM;
1442         return 0;
1443 }
1444
1445 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
1446 {
1447         struct nvme_queue *nvmeq = &dev->queues[qid];
1448
1449         if (dev->ctrl.queue_count > qid)
1450                 return 0;
1451
1452         nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
1453         nvmeq->q_depth = depth;
1454         nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
1455                                          &nvmeq->cq_dma_addr, GFP_KERNEL);
1456         if (!nvmeq->cqes)
1457                 goto free_nvmeq;
1458
1459         if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
1460                 goto free_cqdma;
1461
1462         nvmeq->dev = dev;
1463         spin_lock_init(&nvmeq->sq_lock);
1464         spin_lock_init(&nvmeq->cq_poll_lock);
1465         nvmeq->cq_head = 0;
1466         nvmeq->cq_phase = 1;
1467         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1468         nvmeq->qid = qid;
1469         dev->ctrl.queue_count++;
1470
1471         return 0;
1472
1473  free_cqdma:
1474         dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
1475                           nvmeq->cq_dma_addr);
1476  free_nvmeq:
1477         return -ENOMEM;
1478 }
1479
1480 static int queue_request_irq(struct nvme_queue *nvmeq)
1481 {
1482         struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1483         int nr = nvmeq->dev->ctrl.instance;
1484
1485         if (use_threaded_interrupts) {
1486                 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
1487                                 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1488         } else {
1489                 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
1490                                 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1491         }
1492 }
1493
1494 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1495 {
1496         struct nvme_dev *dev = nvmeq->dev;
1497
1498         nvmeq->sq_tail = 0;
1499         nvmeq->cq_head = 0;
1500         nvmeq->cq_phase = 1;
1501         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1502         memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
1503         nvme_dbbuf_init(dev, nvmeq, qid);
1504         dev->online_queues++;
1505         wmb(); /* ensure the first interrupt sees the initialization */
1506 }
1507
1508 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
1509 {
1510         struct nvme_dev *dev = nvmeq->dev;
1511         int result;
1512         u16 vector = 0;
1513
1514         clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
1515
1516         /*
1517          * A queue's vector matches the queue identifier unless the controller
1518          * has only one vector available.
1519          */
1520         if (!polled)
1521                 vector = dev->num_vecs == 1 ? 0 : qid;
1522         else
1523                 set_bit(NVMEQ_POLLED, &nvmeq->flags);
1524
1525         result = adapter_alloc_cq(dev, qid, nvmeq, vector);
1526         if (result)
1527                 return result;
1528
1529         result = adapter_alloc_sq(dev, qid, nvmeq);
1530         if (result < 0)
1531                 return result;
1532         if (result)
1533                 goto release_cq;
1534
1535         nvmeq->cq_vector = vector;
1536         nvme_init_queue(nvmeq, qid);
1537
1538         if (!polled) {
1539                 result = queue_request_irq(nvmeq);
1540                 if (result < 0)
1541                         goto release_sq;
1542         }
1543
1544         set_bit(NVMEQ_ENABLED, &nvmeq->flags);
1545         return result;
1546
1547 release_sq:
1548         dev->online_queues--;
1549         adapter_delete_sq(dev, qid);
1550 release_cq:
1551         adapter_delete_cq(dev, qid);
1552         return result;
1553 }
1554
1555 static const struct blk_mq_ops nvme_mq_admin_ops = {
1556         .queue_rq       = nvme_queue_rq,
1557         .complete       = nvme_pci_complete_rq,
1558         .init_hctx      = nvme_admin_init_hctx,
1559         .init_request   = nvme_init_request,
1560         .timeout        = nvme_timeout,
1561 };
1562
1563 static const struct blk_mq_ops nvme_mq_ops = {
1564         .queue_rq       = nvme_queue_rq,
1565         .complete       = nvme_pci_complete_rq,
1566         .commit_rqs     = nvme_commit_rqs,
1567         .init_hctx      = nvme_init_hctx,
1568         .init_request   = nvme_init_request,
1569         .map_queues     = nvme_pci_map_queues,
1570         .timeout        = nvme_timeout,
1571         .poll           = nvme_poll,
1572 };
1573
1574 static void nvme_dev_remove_admin(struct nvme_dev *dev)
1575 {
1576         if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1577                 /*
1578                  * If the controller was reset during removal, it's possible
1579                  * user requests may be waiting on a stopped queue. Start the
1580                  * queue to flush these to completion.
1581                  */
1582                 blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1583                 blk_cleanup_queue(dev->ctrl.admin_q);
1584                 blk_mq_free_tag_set(&dev->admin_tagset);
1585         }
1586 }
1587
1588 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1589 {
1590         if (!dev->ctrl.admin_q) {
1591                 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1592                 dev->admin_tagset.nr_hw_queues = 1;
1593
1594                 dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1595                 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1596                 dev->admin_tagset.numa_node = dev->ctrl.numa_node;
1597                 dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
1598                 dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
1599                 dev->admin_tagset.driver_data = dev;
1600
1601                 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
1602                         return -ENOMEM;
1603                 dev->ctrl.admin_tagset = &dev->admin_tagset;
1604
1605                 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
1606                 if (IS_ERR(dev->ctrl.admin_q)) {
1607                         blk_mq_free_tag_set(&dev->admin_tagset);
1608                         return -ENOMEM;
1609                 }
1610                 if (!blk_get_queue(dev->ctrl.admin_q)) {
1611                         nvme_dev_remove_admin(dev);
1612                         dev->ctrl.admin_q = NULL;
1613                         return -ENODEV;
1614                 }
1615         } else
1616                 blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1617
1618         return 0;
1619 }
1620
1621 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1622 {
1623         return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
1624 }
1625
1626 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
1627 {
1628         struct pci_dev *pdev = to_pci_dev(dev->dev);
1629
1630         if (size <= dev->bar_mapped_size)
1631                 return 0;
1632         if (size > pci_resource_len(pdev, 0))
1633                 return -ENOMEM;
1634         if (dev->bar)
1635                 iounmap(dev->bar);
1636         dev->bar = ioremap(pci_resource_start(pdev, 0), size);
1637         if (!dev->bar) {
1638                 dev->bar_mapped_size = 0;
1639                 return -ENOMEM;
1640         }
1641         dev->bar_mapped_size = size;
1642         dev->dbs = dev->bar + NVME_REG_DBS;
1643
1644         return 0;
1645 }
1646
1647 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1648 {
1649         int result;
1650         u32 aqa;
1651         struct nvme_queue *nvmeq;
1652
1653         result = nvme_remap_bar(dev, db_bar_size(dev, 0));
1654         if (result < 0)
1655                 return result;
1656
1657         dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1658                                 NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1659
1660         if (dev->subsystem &&
1661             (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
1662                 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1663
1664         result = nvme_disable_ctrl(&dev->ctrl);
1665         if (result < 0)
1666                 return result;
1667
1668         result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1669         if (result)
1670                 return result;
1671
1672         dev->ctrl.numa_node = dev_to_node(dev->dev);
1673
1674         nvmeq = &dev->queues[0];
1675         aqa = nvmeq->q_depth - 1;
1676         aqa |= aqa << 16;
1677
1678         writel(aqa, dev->bar + NVME_REG_AQA);
1679         lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1680         lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
1681
1682         result = nvme_enable_ctrl(&dev->ctrl);
1683         if (result)
1684                 return result;
1685
1686         nvmeq->cq_vector = 0;
1687         nvme_init_queue(nvmeq, 0);
1688         result = queue_request_irq(nvmeq);
1689         if (result) {
1690                 dev->online_queues--;
1691                 return result;
1692         }
1693
1694         set_bit(NVMEQ_ENABLED, &nvmeq->flags);
1695         return result;
1696 }
1697
1698 static int nvme_create_io_queues(struct nvme_dev *dev)
1699 {
1700         unsigned i, max, rw_queues;
1701         int ret = 0;
1702
1703         for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1704                 if (nvme_alloc_queue(dev, i, dev->q_depth)) {
1705                         ret = -ENOMEM;
1706                         break;
1707                 }
1708         }
1709
1710         max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1711         if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
1712                 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
1713                                 dev->io_queues[HCTX_TYPE_READ];
1714         } else {
1715                 rw_queues = max;
1716         }
1717
1718         for (i = dev->online_queues; i <= max; i++) {
1719                 bool polled = i > rw_queues;
1720
1721                 ret = nvme_create_queue(&dev->queues[i], i, polled);
1722                 if (ret)
1723                         break;
1724         }
1725
1726         /*
1727          * Ignore failing Create SQ/CQ commands, we can continue with less
1728          * than the desired amount of queues, and even a controller without
1729          * I/O queues can still be used to issue admin commands.  This might
1730          * be useful to upgrade a buggy firmware for example.
1731          */
1732         return ret >= 0 ? 0 : ret;
1733 }
1734
1735 static ssize_t nvme_cmb_show(struct device *dev,
1736                              struct device_attribute *attr,
1737                              char *buf)
1738 {
1739         struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
1740
1741         return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
1742                        ndev->cmbloc, ndev->cmbsz);
1743 }
1744 static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
1745
1746 static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
1747 {
1748         u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
1749
1750         return 1ULL << (12 + 4 * szu);
1751 }
1752
1753 static u32 nvme_cmb_size(struct nvme_dev *dev)
1754 {
1755         return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
1756 }
1757
1758 static void nvme_map_cmb(struct nvme_dev *dev)
1759 {
1760         u64 size, offset;
1761         resource_size_t bar_size;
1762         struct pci_dev *pdev = to_pci_dev(dev->dev);
1763         int bar;
1764
1765         if (dev->cmb_size)
1766                 return;
1767
1768         dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1769         if (!dev->cmbsz)
1770                 return;
1771         dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1772
1773         size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
1774         offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
1775         bar = NVME_CMB_BIR(dev->cmbloc);
1776         bar_size = pci_resource_len(pdev, bar);
1777
1778         if (offset > bar_size)
1779                 return;
1780
1781         /*
1782          * Controllers may support a CMB size larger than their BAR,
1783          * for example, due to being behind a bridge. Reduce the CMB to
1784          * the reported size of the BAR
1785          */
1786         if (size > bar_size - offset)
1787                 size = bar_size - offset;
1788
1789         if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
1790                 dev_warn(dev->ctrl.device,
1791                          "failed to register the CMB\n");
1792                 return;
1793         }
1794
1795         dev->cmb_size = size;
1796         dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
1797
1798         if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
1799                         (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
1800                 pci_p2pmem_publish(pdev, true);
1801
1802         if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1803                                     &dev_attr_cmb.attr, NULL))
1804                 dev_warn(dev->ctrl.device,
1805                          "failed to add sysfs attribute for CMB\n");
1806 }
1807
1808 static inline void nvme_release_cmb(struct nvme_dev *dev)
1809 {
1810         if (dev->cmb_size) {
1811                 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1812                                              &dev_attr_cmb.attr, NULL);
1813                 dev->cmb_size = 0;
1814         }
1815 }
1816
1817 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
1818 {
1819         u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
1820         u64 dma_addr = dev->host_mem_descs_dma;
1821         struct nvme_command c;
1822         int ret;
1823
1824         memset(&c, 0, sizeof(c));
1825         c.features.opcode       = nvme_admin_set_features;
1826         c.features.fid          = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
1827         c.features.dword11      = cpu_to_le32(bits);
1828         c.features.dword12      = cpu_to_le32(host_mem_size);
1829         c.features.dword13      = cpu_to_le32(lower_32_bits(dma_addr));
1830         c.features.dword14      = cpu_to_le32(upper_32_bits(dma_addr));
1831         c.features.dword15      = cpu_to_le32(dev->nr_host_mem_descs);
1832
1833         ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1834         if (ret) {
1835                 dev_warn(dev->ctrl.device,
1836                          "failed to set host mem (err %d, flags %#x).\n",
1837                          ret, bits);
1838         }
1839         return ret;
1840 }
1841
1842 static void nvme_free_host_mem(struct nvme_dev *dev)
1843 {
1844         int i;
1845
1846         for (i = 0; i < dev->nr_host_mem_descs; i++) {
1847                 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1848                 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
1849
1850                 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
1851                                le64_to_cpu(desc->addr),
1852                                DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1853         }
1854
1855         kfree(dev->host_mem_desc_bufs);
1856         dev->host_mem_desc_bufs = NULL;
1857         dma_free_coherent(dev->dev,
1858                         dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
1859                         dev->host_mem_descs, dev->host_mem_descs_dma);
1860         dev->host_mem_descs = NULL;
1861         dev->nr_host_mem_descs = 0;
1862 }
1863
1864 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1865                 u32 chunk_size)
1866 {
1867         struct nvme_host_mem_buf_desc *descs;
1868         u32 max_entries, len;
1869         dma_addr_t descs_dma;
1870         int i = 0;
1871         void **bufs;
1872         u64 size, tmp;
1873
1874         tmp = (preferred + chunk_size - 1);
1875         do_div(tmp, chunk_size);
1876         max_entries = tmp;
1877
1878         if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1879                 max_entries = dev->ctrl.hmmaxd;
1880
1881         descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
1882                                    &descs_dma, GFP_KERNEL);
1883         if (!descs)
1884                 goto out;
1885
1886         bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
1887         if (!bufs)
1888                 goto out_free_descs;
1889
1890         for (size = 0; size < preferred && i < max_entries; size += len) {
1891                 dma_addr_t dma_addr;
1892
1893                 len = min_t(u64, chunk_size, preferred - size);
1894                 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
1895                                 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1896                 if (!bufs[i])
1897                         break;
1898
1899                 descs[i].addr = cpu_to_le64(dma_addr);
1900                 descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
1901                 i++;
1902         }
1903
1904         if (!size)
1905                 goto out_free_bufs;
1906
1907         dev->nr_host_mem_descs = i;
1908         dev->host_mem_size = size;
1909         dev->host_mem_descs = descs;
1910         dev->host_mem_descs_dma = descs_dma;
1911         dev->host_mem_desc_bufs = bufs;
1912         return 0;
1913
1914 out_free_bufs:
1915         while (--i >= 0) {
1916                 size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
1917
1918                 dma_free_attrs(dev->dev, size, bufs[i],
1919                                le64_to_cpu(descs[i].addr),
1920                                DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1921         }
1922
1923         kfree(bufs);
1924 out_free_descs:
1925         dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
1926                         descs_dma);
1927 out:
1928         dev->host_mem_descs = NULL;
1929         return -ENOMEM;
1930 }
1931
1932 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
1933 {
1934         u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
1935         u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
1936         u64 chunk_size;
1937
1938         /* start big and work our way down */
1939         for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
1940                 if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
1941                         if (!min || dev->host_mem_size >= min)
1942                                 return 0;
1943                         nvme_free_host_mem(dev);
1944                 }
1945         }
1946
1947         return -ENOMEM;
1948 }
1949
1950 static int nvme_setup_host_mem(struct nvme_dev *dev)
1951 {
1952         u64 max = (u64)max_host_mem_size_mb * SZ_1M;
1953         u64 preferred = (u64)dev->ctrl.hmpre * 4096;
1954         u64 min = (u64)dev->ctrl.hmmin * 4096;
1955         u32 enable_bits = NVME_HOST_MEM_ENABLE;
1956         int ret;
1957
1958         preferred = min(preferred, max);
1959         if (min > max) {
1960                 dev_warn(dev->ctrl.device,
1961                         "min host memory (%lld MiB) above limit (%d MiB).\n",
1962                         min >> ilog2(SZ_1M), max_host_mem_size_mb);
1963                 nvme_free_host_mem(dev);
1964                 return 0;
1965         }
1966
1967         /*
1968          * If we already have a buffer allocated check if we can reuse it.
1969          */
1970         if (dev->host_mem_descs) {
1971                 if (dev->host_mem_size >= min)
1972                         enable_bits |= NVME_HOST_MEM_RETURN;
1973                 else
1974                         nvme_free_host_mem(dev);
1975         }
1976
1977         if (!dev->host_mem_descs) {
1978                 if (nvme_alloc_host_mem(dev, min, preferred)) {
1979                         dev_warn(dev->ctrl.device,
1980                                 "failed to allocate host memory buffer.\n");
1981                         return 0; /* controller must work without HMB */
1982                 }
1983
1984                 dev_info(dev->ctrl.device,
1985                         "allocated %lld MiB host memory buffer.\n",
1986                         dev->host_mem_size >> ilog2(SZ_1M));
1987         }
1988
1989         ret = nvme_set_host_mem(dev, enable_bits);
1990         if (ret)
1991                 nvme_free_host_mem(dev);
1992         return ret;
1993 }
1994
1995 /*
1996  * nirqs is the number of interrupts available for write and read
1997  * queues. The core already reserved an interrupt for the admin queue.
1998  */
1999 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
2000 {
2001         struct nvme_dev *dev = affd->priv;
2002         unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
2003
2004         /*
2005          * If there is no interrupt available for queues, ensure that
2006          * the default queue is set to 1. The affinity set size is
2007          * also set to one, but the irq core ignores it for this case.
2008          *
2009          * If only one interrupt is available or 'write_queue' == 0, combine
2010          * write and read queues.
2011          *
2012          * If 'write_queues' > 0, ensure it leaves room for at least one read
2013          * queue.
2014          */
2015         if (!nrirqs) {
2016                 nrirqs = 1;
2017                 nr_read_queues = 0;
2018         } else if (nrirqs == 1 || !nr_write_queues) {
2019                 nr_read_queues = 0;
2020         } else if (nr_write_queues >= nrirqs) {
2021                 nr_read_queues = 1;
2022         } else {
2023                 nr_read_queues = nrirqs - nr_write_queues;
2024         }
2025
2026         dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2027         affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2028         dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
2029         affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
2030         affd->nr_sets = nr_read_queues ? 2 : 1;
2031 }
2032
2033 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2034 {
2035         struct pci_dev *pdev = to_pci_dev(dev->dev);
2036         struct irq_affinity affd = {
2037                 .pre_vectors    = 1,
2038                 .calc_sets      = nvme_calc_irq_sets,
2039                 .priv           = dev,
2040         };
2041         unsigned int irq_queues, this_p_queues;
2042
2043         /*
2044          * Poll queues don't need interrupts, but we need at least one IO
2045          * queue left over for non-polled IO.
2046          */
2047         this_p_queues = dev->nr_poll_queues;
2048         if (this_p_queues >= nr_io_queues) {
2049                 this_p_queues = nr_io_queues - 1;
2050                 irq_queues = 1;
2051         } else {
2052                 irq_queues = nr_io_queues - this_p_queues + 1;
2053         }
2054         dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2055
2056         /* Initialize for the single interrupt case */
2057         dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2058         dev->io_queues[HCTX_TYPE_READ] = 0;
2059
2060         /*
2061          * Some Apple controllers require all queues to use the
2062          * first vector.
2063          */
2064         if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)
2065                 irq_queues = 1;
2066
2067         return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
2068                               PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2069 }
2070
2071 static void nvme_disable_io_queues(struct nvme_dev *dev)
2072 {
2073         if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
2074                 __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2075 }
2076
2077 static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
2078 {
2079         return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
2080 }
2081
2082 static int nvme_setup_io_queues(struct nvme_dev *dev)
2083 {
2084         struct nvme_queue *adminq = &dev->queues[0];
2085         struct pci_dev *pdev = to_pci_dev(dev->dev);
2086         unsigned int nr_io_queues;
2087         unsigned long size;
2088         int result;
2089
2090         /*
2091          * Sample the module parameters once at reset time so that we have
2092          * stable values to work with.
2093          */
2094         dev->nr_write_queues = write_queues;
2095         dev->nr_poll_queues = poll_queues;
2096
2097         /*
2098          * If tags are shared with admin queue (Apple bug), then
2099          * make sure we only use one IO queue.
2100          */
2101         if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2102                 nr_io_queues = 1;
2103         else
2104                 nr_io_queues = min(nvme_max_io_queues(dev),
2105                                    dev->nr_allocated_queues - 1);
2106
2107         result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2108         if (result < 0)
2109                 return result;
2110
2111         if (nr_io_queues == 0)
2112                 return 0;
2113         
2114         clear_bit(NVMEQ_ENABLED, &adminq->flags);
2115
2116         if (dev->cmb_use_sqes) {
2117                 result = nvme_cmb_qdepth(dev, nr_io_queues,
2118                                 sizeof(struct nvme_command));
2119                 if (result > 0)
2120                         dev->q_depth = result;
2121                 else
2122                         dev->cmb_use_sqes = false;
2123         }
2124
2125         do {
2126                 size = db_bar_size(dev, nr_io_queues);
2127                 result = nvme_remap_bar(dev, size);
2128                 if (!result)
2129                         break;
2130                 if (!--nr_io_queues)
2131                         return -ENOMEM;
2132         } while (1);
2133         adminq->q_db = dev->dbs;
2134
2135  retry:
2136         /* Deregister the admin queue's interrupt */
2137         pci_free_irq(pdev, 0, adminq);
2138
2139         /*
2140          * If we enable msix early due to not intx, disable it again before
2141          * setting up the full range we need.
2142          */
2143         pci_free_irq_vectors(pdev);
2144
2145         result = nvme_setup_irqs(dev, nr_io_queues);
2146         if (result <= 0)
2147                 return -EIO;
2148
2149         dev->num_vecs = result;
2150         result = max(result - 1, 1);
2151         dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
2152
2153         /*
2154          * Should investigate if there's a performance win from allocating
2155          * more queues than interrupt vectors; it might allow the submission
2156          * path to scale better, even if the receive path is limited by the
2157          * number of interrupts.
2158          */
2159         result = queue_request_irq(adminq);
2160         if (result)
2161                 return result;
2162         set_bit(NVMEQ_ENABLED, &adminq->flags);
2163
2164         result = nvme_create_io_queues(dev);
2165         if (result || dev->online_queues < 2)
2166                 return result;
2167
2168         if (dev->online_queues - 1 < dev->max_qid) {
2169                 nr_io_queues = dev->online_queues - 1;
2170                 nvme_disable_io_queues(dev);
2171                 nvme_suspend_io_queues(dev);
2172                 goto retry;
2173         }
2174         dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2175                                         dev->io_queues[HCTX_TYPE_DEFAULT],
2176                                         dev->io_queues[HCTX_TYPE_READ],
2177                                         dev->io_queues[HCTX_TYPE_POLL]);
2178         return 0;
2179 }
2180
2181 static void nvme_del_queue_end(struct request *req, blk_status_t error)
2182 {
2183         struct nvme_queue *nvmeq = req->end_io_data;
2184
2185         blk_mq_free_request(req);
2186         complete(&nvmeq->delete_done);
2187 }
2188
2189 static void nvme_del_cq_end(struct request *req, blk_status_t error)
2190 {
2191         struct nvme_queue *nvmeq = req->end_io_data;
2192
2193         if (error)
2194                 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
2195
2196         nvme_del_queue_end(req, error);
2197 }
2198
2199 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2200 {
2201         struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2202         struct request *req;
2203         struct nvme_command cmd;
2204
2205         memset(&cmd, 0, sizeof(cmd));
2206         cmd.delete_queue.opcode = opcode;
2207         cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2208
2209         req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
2210         if (IS_ERR(req))
2211                 return PTR_ERR(req);
2212
2213         req->timeout = ADMIN_TIMEOUT;
2214         req->end_io_data = nvmeq;
2215
2216         init_completion(&nvmeq->delete_done);
2217         blk_execute_rq_nowait(q, NULL, req, false,
2218                         opcode == nvme_admin_delete_cq ?
2219                                 nvme_del_cq_end : nvme_del_queue_end);
2220         return 0;
2221 }
2222
2223 static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
2224 {
2225         int nr_queues = dev->online_queues - 1, sent = 0;
2226         unsigned long timeout;
2227
2228  retry:
2229         timeout = ADMIN_TIMEOUT;
2230         while (nr_queues > 0) {
2231                 if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
2232                         break;
2233                 nr_queues--;
2234                 sent++;
2235         }
2236         while (sent) {
2237                 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
2238
2239                 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
2240                                 timeout);
2241                 if (timeout == 0)
2242                         return false;
2243
2244                 sent--;
2245                 if (nr_queues)
2246                         goto retry;
2247         }
2248         return true;
2249 }
2250
2251 static void nvme_dev_add(struct nvme_dev *dev)
2252 {
2253         int ret;
2254
2255         if (!dev->ctrl.tagset) {
2256                 dev->tagset.ops = &nvme_mq_ops;
2257                 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2258                 dev->tagset.nr_maps = 2; /* default + read */
2259                 if (dev->io_queues[HCTX_TYPE_POLL])
2260                         dev->tagset.nr_maps++;
2261                 dev->tagset.timeout = NVME_IO_TIMEOUT;
2262                 dev->tagset.numa_node = dev->ctrl.numa_node;
2263                 dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
2264                                                 BLK_MQ_MAX_DEPTH) - 1;
2265                 dev->tagset.cmd_size = sizeof(struct nvme_iod);
2266                 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
2267                 dev->tagset.driver_data = dev;
2268
2269                 /*
2270                  * Some Apple controllers requires tags to be unique
2271                  * across admin and IO queue, so reserve the first 32
2272                  * tags of the IO queue.
2273                  */
2274                 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2275                         dev->tagset.reserved_tags = NVME_AQ_DEPTH;
2276
2277                 ret = blk_mq_alloc_tag_set(&dev->tagset);
2278                 if (ret) {
2279                         dev_warn(dev->ctrl.device,
2280                                 "IO queues tagset allocation failed %d\n", ret);
2281                         return;
2282                 }
2283                 dev->ctrl.tagset = &dev->tagset;
2284         } else {
2285                 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
2286
2287                 /* Free previously allocated queues that are no longer usable */
2288                 nvme_free_queues(dev, dev->online_queues);
2289         }
2290
2291         nvme_dbbuf_set(dev);
2292 }
2293
2294 static int nvme_pci_enable(struct nvme_dev *dev)
2295 {
2296         int result = -ENOMEM;
2297         struct pci_dev *pdev = to_pci_dev(dev->dev);
2298
2299         if (pci_enable_device_mem(pdev))
2300                 return result;
2301
2302         pci_set_master(pdev);
2303
2304         if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
2305                 goto disable;
2306
2307         if (readl(dev->bar + NVME_REG_CSTS) == -1) {
2308                 result = -ENODEV;
2309                 goto disable;
2310         }
2311
2312         /*
2313          * Some devices and/or platforms don't advertise or work with INTx
2314          * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
2315          * adjust this later.
2316          */
2317         result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
2318         if (result < 0)
2319                 return result;
2320
2321         dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2322
2323         dev->q_depth = min_t(u16, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2324                                 io_queue_depth);
2325         dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
2326         dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2327         dev->dbs = dev->bar + 4096;
2328
2329         /*
2330          * Some Apple controllers require a non-standard SQE size.
2331          * Interestingly they also seem to ignore the CC:IOSQES register
2332          * so we don't bother updating it here.
2333          */
2334         if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
2335                 dev->io_sqes = 7;
2336         else
2337                 dev->io_sqes = NVME_NVM_IOSQES;
2338
2339         /*
2340          * Temporary fix for the Apple controller found in the MacBook8,1 and
2341          * some MacBook7,1 to avoid controller resets and data loss.
2342          */
2343         if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2344                 dev->q_depth = 2;
2345                 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
2346                         "set queue depth=%u to work around controller resets\n",
2347                         dev->q_depth);
2348         } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
2349                    (pdev->device == 0xa821 || pdev->device == 0xa822) &&
2350                    NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2351                 dev->q_depth = 64;
2352                 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
2353                         "set queue depth=%u\n", dev->q_depth);
2354         }
2355
2356         /*
2357          * Controllers with the shared tags quirk need the IO queue to be
2358          * big enough so that we get 32 tags for the admin queue
2359          */
2360         if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
2361             (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
2362                 dev->q_depth = NVME_AQ_DEPTH + 2;
2363                 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
2364                          dev->q_depth);
2365         }
2366
2367
2368         nvme_map_cmb(dev);
2369
2370         pci_enable_pcie_error_reporting(pdev);
2371         pci_save_state(pdev);
2372         return 0;
2373
2374  disable:
2375         pci_disable_device(pdev);
2376         return result;
2377 }
2378
2379 static void nvme_dev_unmap(struct nvme_dev *dev)
2380 {
2381         if (dev->bar)
2382                 iounmap(dev->bar);
2383         pci_release_mem_regions(to_pci_dev(dev->dev));
2384 }
2385
2386 static void nvme_pci_disable(struct nvme_dev *dev)
2387 {
2388         struct pci_dev *pdev = to_pci_dev(dev->dev);
2389
2390         pci_free_irq_vectors(pdev);
2391
2392         if (pci_is_enabled(pdev)) {
2393                 pci_disable_pcie_error_reporting(pdev);
2394                 pci_disable_device(pdev);
2395         }
2396 }
2397
2398 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2399 {
2400         bool dead = true, freeze = false;
2401         struct pci_dev *pdev = to_pci_dev(dev->dev);
2402
2403         mutex_lock(&dev->shutdown_lock);
2404         if (pci_is_enabled(pdev)) {
2405                 u32 csts = readl(dev->bar + NVME_REG_CSTS);
2406
2407                 if (dev->ctrl.state == NVME_CTRL_LIVE ||
2408                     dev->ctrl.state == NVME_CTRL_RESETTING) {
2409                         freeze = true;
2410                         nvme_start_freeze(&dev->ctrl);
2411                 }
2412                 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
2413                         pdev->error_state  != pci_channel_io_normal);
2414         }
2415
2416         /*
2417          * Give the controller a chance to complete all entered requests if
2418          * doing a safe shutdown.
2419          */
2420         if (!dead && shutdown && freeze)
2421                 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
2422
2423         nvme_stop_queues(&dev->ctrl);
2424
2425         if (!dead && dev->ctrl.queue_count > 0) {
2426                 nvme_disable_io_queues(dev);
2427                 nvme_disable_admin_queue(dev, shutdown);
2428         }
2429         nvme_suspend_io_queues(dev);
2430         nvme_suspend_queue(&dev->queues[0]);
2431         nvme_pci_disable(dev);
2432         nvme_reap_pending_cqes(dev);
2433
2434         blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
2435         blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
2436         blk_mq_tagset_wait_completed_request(&dev->tagset);
2437         blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
2438
2439         /*
2440          * The driver will not be starting up queues again if shutting down so
2441          * must flush all entered requests to their failed completion to avoid
2442          * deadlocking blk-mq hot-cpu notifier.
2443          */
2444         if (shutdown) {
2445                 nvme_start_queues(&dev->ctrl);
2446                 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
2447                         blk_mq_unquiesce_queue(dev->ctrl.admin_q);
2448         }
2449         mutex_unlock(&dev->shutdown_lock);
2450 }
2451
2452 static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
2453 {
2454         if (!nvme_wait_reset(&dev->ctrl))
2455                 return -EBUSY;
2456         nvme_dev_disable(dev, shutdown);
2457         return 0;
2458 }
2459
2460 static int nvme_setup_prp_pools(struct nvme_dev *dev)
2461 {
2462         dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
2463                                                 PAGE_SIZE, PAGE_SIZE, 0);
2464         if (!dev->prp_page_pool)
2465                 return -ENOMEM;
2466
2467         /* Optimisation for I/Os between 4k and 128k */
2468         dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2469                                                 256, 256, 0);
2470         if (!dev->prp_small_pool) {
2471                 dma_pool_destroy(dev->prp_page_pool);
2472                 return -ENOMEM;
2473         }
2474         return 0;
2475 }
2476
2477 static void nvme_release_prp_pools(struct nvme_dev *dev)
2478 {
2479         dma_pool_destroy(dev->prp_page_pool);
2480         dma_pool_destroy(dev->prp_small_pool);
2481 }
2482
2483 static void nvme_free_tagset(struct nvme_dev *dev)
2484 {
2485         if (dev->tagset.tags)
2486                 blk_mq_free_tag_set(&dev->tagset);
2487         dev->ctrl.tagset = NULL;
2488 }
2489
2490 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2491 {
2492         struct nvme_dev *dev = to_nvme_dev(ctrl);
2493
2494         nvme_dbbuf_dma_free(dev);
2495         nvme_free_tagset(dev);
2496         if (dev->ctrl.admin_q)
2497                 blk_put_queue(dev->ctrl.admin_q);
2498         free_opal_dev(dev->ctrl.opal_dev);
2499         mempool_destroy(dev->iod_mempool);
2500         put_device(dev->dev);
2501         kfree(dev->queues);
2502         kfree(dev);
2503 }
2504
2505 static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
2506 {
2507         /*
2508          * Set state to deleting now to avoid blocking nvme_wait_reset(), which
2509          * may be holding this pci_dev's device lock.
2510          */
2511         nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2512         nvme_get_ctrl(&dev->ctrl);
2513         nvme_dev_disable(dev, false);
2514         nvme_kill_queues(&dev->ctrl);
2515         if (!queue_work(nvme_wq, &dev->remove_work))
2516                 nvme_put_ctrl(&dev->ctrl);
2517 }
2518
2519 static void nvme_reset_work(struct work_struct *work)
2520 {
2521         struct nvme_dev *dev =
2522                 container_of(work, struct nvme_dev, ctrl.reset_work);
2523         bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2524         int result;
2525
2526         if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
2527                 result = -ENODEV;
2528                 goto out;
2529         }
2530
2531         /*
2532          * If we're called to reset a live controller first shut it down before
2533          * moving on.
2534          */
2535         if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2536                 nvme_dev_disable(dev, false);
2537         nvme_sync_queues(&dev->ctrl);
2538
2539         mutex_lock(&dev->shutdown_lock);
2540         result = nvme_pci_enable(dev);
2541         if (result)
2542                 goto out_unlock;
2543
2544         result = nvme_pci_configure_admin_queue(dev);
2545         if (result)
2546                 goto out_unlock;
2547
2548         result = nvme_alloc_admin_tags(dev);
2549         if (result)
2550                 goto out_unlock;
2551
2552         /*
2553          * Limit the max command size to prevent iod->sg allocations going
2554          * over a single page.
2555          */
2556         dev->ctrl.max_hw_sectors = min_t(u32,
2557                 NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9);
2558         dev->ctrl.max_segments = NVME_MAX_SEGS;
2559
2560         /*
2561          * Don't limit the IOMMU merged segment size.
2562          */
2563         dma_set_max_seg_size(dev->dev, 0xffffffff);
2564
2565         mutex_unlock(&dev->shutdown_lock);
2566
2567         /*
2568          * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2569          * initializing procedure here.
2570          */
2571         if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2572                 dev_warn(dev->ctrl.device,
2573                         "failed to mark controller CONNECTING\n");
2574                 result = -EBUSY;
2575                 goto out;
2576         }
2577
2578         /*
2579          * We do not support an SGL for metadata (yet), so we are limited to a
2580          * single integrity segment for the separate metadata pointer.
2581          */
2582         dev->ctrl.max_integrity_segments = 1;
2583
2584         result = nvme_init_identify(&dev->ctrl);
2585         if (result)
2586                 goto out;
2587
2588         if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
2589                 if (!dev->ctrl.opal_dev)
2590                         dev->ctrl.opal_dev =
2591                                 init_opal_dev(&dev->ctrl, &nvme_sec_submit);
2592                 else if (was_suspend)
2593                         opal_unlock_from_suspend(dev->ctrl.opal_dev);
2594         } else {
2595                 free_opal_dev(dev->ctrl.opal_dev);
2596                 dev->ctrl.opal_dev = NULL;
2597         }
2598
2599         if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
2600                 result = nvme_dbbuf_dma_alloc(dev);
2601                 if (result)
2602                         dev_warn(dev->dev,
2603                                  "unable to allocate dma for dbbuf\n");
2604         }
2605
2606         if (dev->ctrl.hmpre) {
2607                 result = nvme_setup_host_mem(dev);
2608                 if (result < 0)
2609                         goto out;
2610         }
2611
2612         result = nvme_setup_io_queues(dev);
2613         if (result)
2614                 goto out;
2615
2616         /*
2617          * Keep the controller around but remove all namespaces if we don't have
2618          * any working I/O queue.
2619          */
2620         if (dev->online_queues < 2) {
2621                 dev_warn(dev->ctrl.device, "IO queues not created\n");
2622                 nvme_kill_queues(&dev->ctrl);
2623                 nvme_remove_namespaces(&dev->ctrl);
2624                 nvme_free_tagset(dev);
2625         } else {
2626                 nvme_start_queues(&dev->ctrl);
2627                 nvme_wait_freeze(&dev->ctrl);
2628                 nvme_dev_add(dev);
2629                 nvme_unfreeze(&dev->ctrl);
2630         }
2631
2632         /*
2633          * If only admin queue live, keep it to do further investigation or
2634          * recovery.
2635          */
2636         if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
2637                 dev_warn(dev->ctrl.device,
2638                         "failed to mark controller live state\n");
2639                 result = -ENODEV;
2640                 goto out;
2641         }
2642
2643         nvme_start_ctrl(&dev->ctrl);
2644         return;
2645
2646  out_unlock:
2647         mutex_unlock(&dev->shutdown_lock);
2648  out:
2649         if (result)
2650                 dev_warn(dev->ctrl.device,
2651                          "Removing after probe failure status: %d\n", result);
2652         nvme_remove_dead_ctrl(dev);
2653 }
2654
2655 static void nvme_remove_dead_ctrl_work(struct work_struct *work)
2656 {
2657         struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2658         struct pci_dev *pdev = to_pci_dev(dev->dev);
2659
2660         if (pci_get_drvdata(pdev))
2661                 device_release_driver(&pdev->dev);
2662         nvme_put_ctrl(&dev->ctrl);
2663 }
2664
2665 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
2666 {
2667         *val = readl(to_nvme_dev(ctrl)->bar + off);
2668         return 0;
2669 }
2670
2671 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2672 {
2673         writel(val, to_nvme_dev(ctrl)->bar + off);
2674         return 0;
2675 }
2676
2677 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
2678 {
2679         *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
2680         return 0;
2681 }
2682
2683 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2684 {
2685         struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
2686
2687         return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
2688 }
2689
2690 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2691         .name                   = "pcie",
2692         .module                 = THIS_MODULE,
2693         .flags                  = NVME_F_METADATA_SUPPORTED |
2694                                   NVME_F_PCI_P2PDMA,
2695         .reg_read32             = nvme_pci_reg_read32,
2696         .reg_write32            = nvme_pci_reg_write32,
2697         .reg_read64             = nvme_pci_reg_read64,
2698         .free_ctrl              = nvme_pci_free_ctrl,
2699         .submit_async_event     = nvme_pci_submit_async_event,
2700         .get_address            = nvme_pci_get_address,
2701 };
2702
2703 static int nvme_dev_map(struct nvme_dev *dev)
2704 {
2705         struct pci_dev *pdev = to_pci_dev(dev->dev);
2706
2707         if (pci_request_mem_regions(pdev, "nvme"))
2708                 return -ENODEV;
2709
2710         if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
2711                 goto release;
2712
2713         return 0;
2714   release:
2715         pci_release_mem_regions(pdev);
2716         return -ENODEV;
2717 }
2718
2719 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
2720 {
2721         if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
2722                 /*
2723                  * Several Samsung devices seem to drop off the PCIe bus
2724                  * randomly when APST is on and uses the deepest sleep state.
2725                  * This has been observed on a Samsung "SM951 NVMe SAMSUNG
2726                  * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
2727                  * 950 PRO 256GB", but it seems to be restricted to two Dell
2728                  * laptops.
2729                  */
2730                 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
2731                     (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
2732                      dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
2733                         return NVME_QUIRK_NO_DEEPEST_PS;
2734         } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
2735                 /*
2736                  * Samsung SSD 960 EVO drops off the PCIe bus after system
2737                  * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
2738                  * within few minutes after bootup on a Coffee Lake board -
2739                  * ASUS PRIME Z370-A
2740                  */
2741                 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
2742                     (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
2743                      dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
2744                         return NVME_QUIRK_NO_APST;
2745         } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
2746                     pdev->device == 0xa808 || pdev->device == 0xa809)) ||
2747                    (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
2748                 /*
2749                  * Forcing to use host managed nvme power settings for
2750                  * lowest idle power with quick resume latency on
2751                  * Samsung and Toshiba SSDs based on suspend behavior
2752                  * on Coffee Lake board for LENOVO C640
2753                  */
2754                 if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
2755                      dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
2756                         return NVME_QUIRK_SIMPLE_SUSPEND;
2757         }
2758
2759         return 0;
2760 }
2761
2762 #ifdef CONFIG_ACPI
2763 static bool nvme_acpi_storage_d3(struct pci_dev *dev)
2764 {
2765         struct acpi_device *adev;
2766         struct pci_dev *root;
2767         acpi_handle handle;
2768         acpi_status status;
2769         u8 val;
2770
2771         /*
2772          * Look for _DSD property specifying that the storage device on the port
2773          * must use D3 to support deep platform power savings during
2774          * suspend-to-idle.
2775          */
2776         root = pcie_find_root_port(dev);
2777         if (!root)
2778                 return false;
2779
2780         adev = ACPI_COMPANION(&root->dev);
2781         if (!adev)
2782                 return false;
2783
2784         /*
2785          * The property is defined in the PXSX device for South complex ports
2786          * and in the PEGP device for North complex ports.
2787          */
2788         status = acpi_get_handle(adev->handle, "PXSX", &handle);
2789         if (ACPI_FAILURE(status)) {
2790                 status = acpi_get_handle(adev->handle, "PEGP", &handle);
2791                 if (ACPI_FAILURE(status))
2792                         return false;
2793         }
2794
2795         if (acpi_bus_get_device(handle, &adev))
2796                 return false;
2797
2798         if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
2799                         &val))
2800                 return false;
2801         return val == 1;
2802 }
2803 #else
2804 static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
2805 {
2806         return false;
2807 }
2808 #endif /* CONFIG_ACPI */
2809
2810 static void nvme_async_probe(void *data, async_cookie_t cookie)
2811 {
2812         struct nvme_dev *dev = data;
2813
2814         flush_work(&dev->ctrl.reset_work);
2815         flush_work(&dev->ctrl.scan_work);
2816         nvme_put_ctrl(&dev->ctrl);
2817 }
2818
2819 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2820 {
2821         int node, result = -ENOMEM;
2822         struct nvme_dev *dev;
2823         unsigned long quirks = id->driver_data;
2824         size_t alloc_size;
2825
2826         node = dev_to_node(&pdev->dev);
2827         if (node == NUMA_NO_NODE)
2828                 set_dev_node(&pdev->dev, first_memory_node);
2829
2830         dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
2831         if (!dev)
2832                 return -ENOMEM;
2833
2834         dev->nr_write_queues = write_queues;
2835         dev->nr_poll_queues = poll_queues;
2836         dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
2837         dev->queues = kcalloc_node(dev->nr_allocated_queues,
2838                         sizeof(struct nvme_queue), GFP_KERNEL, node);
2839         if (!dev->queues)
2840                 goto free;
2841
2842         dev->dev = get_device(&pdev->dev);
2843         pci_set_drvdata(pdev, dev);
2844
2845         result = nvme_dev_map(dev);
2846         if (result)
2847                 goto put_pci;
2848
2849         INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2850         INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2851         mutex_init(&dev->shutdown_lock);
2852
2853         result = nvme_setup_prp_pools(dev);
2854         if (result)
2855                 goto unmap;
2856
2857         quirks |= check_vendor_combination_bug(pdev);
2858
2859         if (!noacpi && nvme_acpi_storage_d3(pdev)) {
2860                 /*
2861                  * Some systems use a bios work around to ask for D3 on
2862                  * platforms that support kernel managed suspend.
2863                  */
2864                 dev_info(&pdev->dev,
2865                          "platform quirk: setting simple suspend\n");
2866                 quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
2867         }
2868
2869         /*
2870          * Double check that our mempool alloc size will cover the biggest
2871          * command we support.
2872          */
2873         alloc_size = nvme_pci_iod_alloc_size();
2874         WARN_ON_ONCE(alloc_size > PAGE_SIZE);
2875
2876         dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
2877                                                 mempool_kfree,
2878                                                 (void *) alloc_size,
2879                                                 GFP_KERNEL, node);
2880         if (!dev->iod_mempool) {
2881                 result = -ENOMEM;
2882                 goto release_pools;
2883         }
2884
2885         result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2886                         quirks);
2887         if (result)
2888                 goto release_mempool;
2889
2890         dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2891
2892         nvme_reset_ctrl(&dev->ctrl);
2893         async_schedule(nvme_async_probe, dev);
2894
2895         return 0;
2896
2897  release_mempool:
2898         mempool_destroy(dev->iod_mempool);
2899  release_pools:
2900         nvme_release_prp_pools(dev);
2901  unmap:
2902         nvme_dev_unmap(dev);
2903  put_pci:
2904         put_device(dev->dev);
2905  free:
2906         kfree(dev->queues);
2907         kfree(dev);
2908         return result;
2909 }
2910
2911 static void nvme_reset_prepare(struct pci_dev *pdev)
2912 {
2913         struct nvme_dev *dev = pci_get_drvdata(pdev);
2914
2915         /*
2916          * We don't need to check the return value from waiting for the reset
2917          * state as pci_dev device lock is held, making it impossible to race
2918          * with ->remove().
2919          */
2920         nvme_disable_prepare_reset(dev, false);
2921         nvme_sync_queues(&dev->ctrl);
2922 }
2923
2924 static void nvme_reset_done(struct pci_dev *pdev)
2925 {
2926         struct nvme_dev *dev = pci_get_drvdata(pdev);
2927
2928         if (!nvme_try_sched_reset(&dev->ctrl))
2929                 flush_work(&dev->ctrl.reset_work);
2930 }
2931
2932 static void nvme_shutdown(struct pci_dev *pdev)
2933 {
2934         struct nvme_dev *dev = pci_get_drvdata(pdev);
2935
2936         nvme_disable_prepare_reset(dev, true);
2937 }
2938
2939 /*
2940  * The driver's remove may be called on a device in a partially initialized
2941  * state. This function must not have any dependencies on the device state in
2942  * order to proceed.
2943  */
2944 static void nvme_remove(struct pci_dev *pdev)
2945 {
2946         struct nvme_dev *dev = pci_get_drvdata(pdev);
2947
2948         nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2949         pci_set_drvdata(pdev, NULL);
2950
2951         if (!pci_device_is_present(pdev)) {
2952                 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2953                 nvme_dev_disable(dev, true);
2954                 nvme_dev_remove_admin(dev);
2955         }
2956
2957         flush_work(&dev->ctrl.reset_work);
2958         nvme_stop_ctrl(&dev->ctrl);
2959         nvme_remove_namespaces(&dev->ctrl);
2960         nvme_dev_disable(dev, true);
2961         nvme_release_cmb(dev);
2962         nvme_free_host_mem(dev);
2963         nvme_dev_remove_admin(dev);
2964         nvme_free_queues(dev, 0);
2965         nvme_release_prp_pools(dev);
2966         nvme_dev_unmap(dev);
2967         nvme_uninit_ctrl(&dev->ctrl);
2968 }
2969
2970 #ifdef CONFIG_PM_SLEEP
2971 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
2972 {
2973         return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
2974 }
2975
2976 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
2977 {
2978         return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
2979 }
2980
2981 static int nvme_resume(struct device *dev)
2982 {
2983         struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
2984         struct nvme_ctrl *ctrl = &ndev->ctrl;
2985
2986         if (ndev->last_ps == U32_MAX ||
2987             nvme_set_power_state(ctrl, ndev->last_ps) != 0)
2988                 return nvme_try_sched_reset(&ndev->ctrl);
2989         return 0;
2990 }
2991
2992 static int nvme_suspend(struct device *dev)
2993 {
2994         struct pci_dev *pdev = to_pci_dev(dev);
2995         struct nvme_dev *ndev = pci_get_drvdata(pdev);
2996         struct nvme_ctrl *ctrl = &ndev->ctrl;
2997         int ret = -EBUSY;
2998
2999         ndev->last_ps = U32_MAX;
3000
3001         /*
3002          * The platform does not remove power for a kernel managed suspend so
3003          * use host managed nvme power settings for lowest idle power if
3004          * possible. This should have quicker resume latency than a full device
3005          * shutdown.  But if the firmware is involved after the suspend or the
3006          * device does not support any non-default power states, shut down the
3007          * device fully.
3008          *
3009          * If ASPM is not enabled for the device, shut down the device and allow
3010          * the PCI bus layer to put it into D3 in order to take the PCIe link
3011          * down, so as to allow the platform to achieve its minimum low-power
3012          * state (which may not be possible if the link is up).
3013          *
3014          * If a host memory buffer is enabled, shut down the device as the NVMe
3015          * specification allows the device to access the host memory buffer in
3016          * host DRAM from all power states, but hosts will fail access to DRAM
3017          * during S3.
3018          */
3019         if (pm_suspend_via_firmware() || !ctrl->npss ||
3020             !pcie_aspm_enabled(pdev) ||
3021             ndev->nr_host_mem_descs ||
3022             (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
3023                 return nvme_disable_prepare_reset(ndev, true);
3024
3025         nvme_start_freeze(ctrl);
3026         nvme_wait_freeze(ctrl);
3027         nvme_sync_queues(ctrl);
3028
3029         if (ctrl->state != NVME_CTRL_LIVE)
3030                 goto unfreeze;
3031
3032         ret = nvme_get_power_state(ctrl, &ndev->last_ps);
3033         if (ret < 0)
3034                 goto unfreeze;
3035
3036         /*
3037          * A saved state prevents pci pm from generically controlling the
3038          * device's power. If we're using protocol specific settings, we don't
3039          * want pci interfering.
3040          */
3041         pci_save_state(pdev);
3042
3043         ret = nvme_set_power_state(ctrl, ctrl->npss);
3044         if (ret < 0)
3045                 goto unfreeze;
3046
3047         if (ret) {
3048                 /* discard the saved state */
3049                 pci_load_saved_state(pdev, NULL);
3050
3051                 /*
3052                  * Clearing npss forces a controller reset on resume. The
3053                  * correct value will be rediscovered then.
3054                  */
3055                 ret = nvme_disable_prepare_reset(ndev, true);
3056                 ctrl->npss = 0;
3057         }
3058 unfreeze:
3059         nvme_unfreeze(ctrl);
3060         return ret;
3061 }
3062
3063 static int nvme_simple_suspend(struct device *dev)
3064 {
3065         struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
3066
3067         return nvme_disable_prepare_reset(ndev, true);
3068 }
3069
3070 static int nvme_simple_resume(struct device *dev)
3071 {
3072         struct pci_dev *pdev = to_pci_dev(dev);
3073         struct nvme_dev *ndev = pci_get_drvdata(pdev);
3074
3075         return nvme_try_sched_reset(&ndev->ctrl);
3076 }
3077
3078 static const struct dev_pm_ops nvme_dev_pm_ops = {
3079         .suspend        = nvme_suspend,
3080         .resume         = nvme_resume,
3081         .freeze         = nvme_simple_suspend,
3082         .thaw           = nvme_simple_resume,
3083         .poweroff       = nvme_simple_suspend,
3084         .restore        = nvme_simple_resume,
3085 };
3086 #endif /* CONFIG_PM_SLEEP */
3087
3088 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
3089                                                 pci_channel_state_t state)
3090 {
3091         struct nvme_dev *dev = pci_get_drvdata(pdev);
3092
3093         /*
3094          * A frozen channel requires a reset. When detected, this method will
3095          * shutdown the controller to quiesce. The controller will be restarted
3096          * after the slot reset through driver's slot_reset callback.
3097          */
3098         switch (state) {
3099         case pci_channel_io_normal:
3100                 return PCI_ERS_RESULT_CAN_RECOVER;
3101         case pci_channel_io_frozen:
3102                 dev_warn(dev->ctrl.device,
3103                         "frozen state error detected, reset controller\n");
3104                 nvme_dev_disable(dev, false);
3105                 return PCI_ERS_RESULT_NEED_RESET;
3106         case pci_channel_io_perm_failure:
3107                 dev_warn(dev->ctrl.device,
3108                         "failure state error detected, request disconnect\n");
3109                 return PCI_ERS_RESULT_DISCONNECT;
3110         }
3111         return PCI_ERS_RESULT_NEED_RESET;
3112 }
3113
3114 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
3115 {
3116         struct nvme_dev *dev = pci_get_drvdata(pdev);
3117
3118         dev_info(dev->ctrl.device, "restart after slot reset\n");
3119         pci_restore_state(pdev);
3120         nvme_reset_ctrl(&dev->ctrl);
3121         return PCI_ERS_RESULT_RECOVERED;
3122 }
3123
3124 static void nvme_error_resume(struct pci_dev *pdev)
3125 {
3126         struct nvme_dev *dev = pci_get_drvdata(pdev);
3127
3128         flush_work(&dev->ctrl.reset_work);
3129 }
3130
3131 static const struct pci_error_handlers nvme_err_handler = {
3132         .error_detected = nvme_error_detected,
3133         .slot_reset     = nvme_slot_reset,
3134         .resume         = nvme_error_resume,
3135         .reset_prepare  = nvme_reset_prepare,
3136         .reset_done     = nvme_reset_done,
3137 };
3138
3139 static const struct pci_device_id nvme_id_table[] = {
3140         { PCI_VDEVICE(INTEL, 0x0953),   /* Intel 750/P3500/P3600/P3700 */
3141                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3142                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
3143         { PCI_VDEVICE(INTEL, 0x0a53),   /* Intel P3520 */
3144                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3145                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
3146         { PCI_VDEVICE(INTEL, 0x0a54),   /* Intel P4500/P4600 */
3147                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3148                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
3149         { PCI_VDEVICE(INTEL, 0x0a55),   /* Dell Express Flash P4600 */
3150                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3151                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
3152         { PCI_VDEVICE(INTEL, 0xf1a5),   /* Intel 600P/P3100 */
3153                 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
3154                                 NVME_QUIRK_MEDIUM_PRIO_SQ |
3155                                 NVME_QUIRK_NO_TEMP_THRESH_CHANGE },
3156         { PCI_VDEVICE(INTEL, 0xf1a6),   /* Intel 760p/Pro 7600p */
3157                 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3158         { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
3159                 .driver_data = NVME_QUIRK_IDENTIFY_CNS |
3160                                 NVME_QUIRK_DISABLE_WRITE_ZEROES, },
3161         { PCI_DEVICE(0x126f, 0x2263),   /* Silicon Motion unidentified */
3162                 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
3163         { PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
3164                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3165         { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
3166                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3167         { PCI_DEVICE(0x1c58, 0x0023),   /* WDC SN200 adapter */
3168                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3169         { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
3170                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3171         { PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
3172                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3173         { PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
3174                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3175         { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
3176                 .driver_data = NVME_QUIRK_LIGHTNVM, },
3177         { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
3178                 .driver_data = NVME_QUIRK_LIGHTNVM, },
3179         { PCI_DEVICE(0x1d1d, 0x2601),   /* CNEX Granby */
3180                 .driver_data = NVME_QUIRK_LIGHTNVM, },
3181         { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
3182                 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3183         { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
3184                 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
3185                                 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3186         { PCI_DEVICE(0x1c5c, 0x1504),   /* SK Hynix PC400 */
3187                 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
3188         { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
3189         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
3190                 .driver_data = NVME_QUIRK_SINGLE_VECTOR },
3191         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
3192         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
3193                 .driver_data = NVME_QUIRK_SINGLE_VECTOR |
3194                                 NVME_QUIRK_128_BYTES_SQES |
3195                                 NVME_QUIRK_SHARED_TAGS },
3196         { 0, }
3197 };
3198 MODULE_DEVICE_TABLE(pci, nvme_id_table);
3199
3200 static struct pci_driver nvme_driver = {
3201         .name           = "nvme",
3202         .id_table       = nvme_id_table,
3203         .probe          = nvme_probe,
3204         .remove         = nvme_remove,
3205         .shutdown       = nvme_shutdown,
3206 #ifdef CONFIG_PM_SLEEP
3207         .driver         = {
3208                 .pm     = &nvme_dev_pm_ops,
3209         },
3210 #endif
3211         .sriov_configure = pci_sriov_configure_simple,
3212         .err_handler    = &nvme_err_handler,
3213 };
3214
3215 static int __init nvme_init(void)
3216 {
3217         BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
3218         BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
3219         BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
3220         BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
3221
3222         return pci_register_driver(&nvme_driver);
3223 }
3224
3225 static void __exit nvme_exit(void)
3226 {
3227         pci_unregister_driver(&nvme_driver);
3228         flush_workqueue(nvme_wq);
3229 }
3230
3231 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
3232 MODULE_LICENSE("GPL");
3233 MODULE_VERSION("1.0");
3234 module_init(nvme_init);
3235 module_exit(nvme_exit);