PCI/switchtec: Separate Gen3 register structures into unions
[linux-2.6-microblaze.git] / drivers / pci / switch / switchtec.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
9
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
17 #include <linux/nospec.h>
18
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
23
24 static int max_devices = 16;
25 module_param(max_devices, int, 0644);
26 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
27
28 static bool use_dma_mrpc = 1;
29 module_param(use_dma_mrpc, bool, 0644);
30 MODULE_PARM_DESC(use_dma_mrpc,
31                  "Enable the use of the DMA MRPC feature");
32
33 static int nirqs = 32;
34 module_param(nirqs, int, 0644);
35 MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
36
37 static dev_t switchtec_devt;
38 static DEFINE_IDA(switchtec_minor_ida);
39
40 struct class *switchtec_class;
41 EXPORT_SYMBOL_GPL(switchtec_class);
42
43 enum mrpc_state {
44         MRPC_IDLE = 0,
45         MRPC_QUEUED,
46         MRPC_RUNNING,
47         MRPC_DONE,
48 };
49
50 struct switchtec_user {
51         struct switchtec_dev *stdev;
52
53         enum mrpc_state state;
54
55         struct completion comp;
56         struct kref kref;
57         struct list_head list;
58
59         u32 cmd;
60         u32 status;
61         u32 return_code;
62         size_t data_len;
63         size_t read_len;
64         unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
65         int event_cnt;
66 };
67
68 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
69 {
70         struct switchtec_user *stuser;
71
72         stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
73         if (!stuser)
74                 return ERR_PTR(-ENOMEM);
75
76         get_device(&stdev->dev);
77         stuser->stdev = stdev;
78         kref_init(&stuser->kref);
79         INIT_LIST_HEAD(&stuser->list);
80         init_completion(&stuser->comp);
81         stuser->event_cnt = atomic_read(&stdev->event_cnt);
82
83         dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
84
85         return stuser;
86 }
87
88 static void stuser_free(struct kref *kref)
89 {
90         struct switchtec_user *stuser;
91
92         stuser = container_of(kref, struct switchtec_user, kref);
93
94         dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
95
96         put_device(&stuser->stdev->dev);
97         kfree(stuser);
98 }
99
100 static void stuser_put(struct switchtec_user *stuser)
101 {
102         kref_put(&stuser->kref, stuser_free);
103 }
104
105 static void stuser_set_state(struct switchtec_user *stuser,
106                              enum mrpc_state state)
107 {
108         /* requires the mrpc_mutex to already be held when called */
109
110         const char * const state_names[] = {
111                 [MRPC_IDLE] = "IDLE",
112                 [MRPC_QUEUED] = "QUEUED",
113                 [MRPC_RUNNING] = "RUNNING",
114                 [MRPC_DONE] = "DONE",
115         };
116
117         stuser->state = state;
118
119         dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
120                 stuser, state_names[state]);
121 }
122
123 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
124
125 static void flush_wc_buf(struct switchtec_dev *stdev)
126 {
127         struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
128
129         /*
130          * odb (outbound doorbell) register is processed by low latency
131          * hardware and w/o side effect
132          */
133         mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
134                 SWITCHTEC_NTB_REG_DBMSG_OFFSET;
135         ioread32(&mmio_dbmsg->odb);
136 }
137
138 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
139 {
140         /* requires the mrpc_mutex to already be held when called */
141
142         struct switchtec_user *stuser;
143
144         if (stdev->mrpc_busy)
145                 return;
146
147         if (list_empty(&stdev->mrpc_queue))
148                 return;
149
150         stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
151                             list);
152
153         if (stdev->dma_mrpc) {
154                 stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
155                 memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
156         }
157
158         stuser_set_state(stuser, MRPC_RUNNING);
159         stdev->mrpc_busy = 1;
160         memcpy_toio(&stdev->mmio_mrpc->input_data,
161                     stuser->data, stuser->data_len);
162         flush_wc_buf(stdev);
163         iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
164
165         schedule_delayed_work(&stdev->mrpc_timeout,
166                               msecs_to_jiffies(500));
167 }
168
169 static int mrpc_queue_cmd(struct switchtec_user *stuser)
170 {
171         /* requires the mrpc_mutex to already be held when called */
172
173         struct switchtec_dev *stdev = stuser->stdev;
174
175         kref_get(&stuser->kref);
176         stuser->read_len = sizeof(stuser->data);
177         stuser_set_state(stuser, MRPC_QUEUED);
178         init_completion(&stuser->comp);
179         list_add_tail(&stuser->list, &stdev->mrpc_queue);
180
181         mrpc_cmd_submit(stdev);
182
183         return 0;
184 }
185
186 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
187 {
188         /* requires the mrpc_mutex to already be held when called */
189         struct switchtec_user *stuser;
190
191         if (list_empty(&stdev->mrpc_queue))
192                 return;
193
194         stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
195                             list);
196
197         if (stdev->dma_mrpc)
198                 stuser->status = stdev->dma_mrpc->status;
199         else
200                 stuser->status = ioread32(&stdev->mmio_mrpc->status);
201
202         if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
203                 return;
204
205         stuser_set_state(stuser, MRPC_DONE);
206         stuser->return_code = 0;
207
208         if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
209                 goto out;
210
211         if (stdev->dma_mrpc)
212                 stuser->return_code = stdev->dma_mrpc->rtn_code;
213         else
214                 stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
215         if (stuser->return_code != 0)
216                 goto out;
217
218         if (stdev->dma_mrpc)
219                 memcpy(stuser->data, &stdev->dma_mrpc->data,
220                               stuser->read_len);
221         else
222                 memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
223                               stuser->read_len);
224 out:
225         complete_all(&stuser->comp);
226         list_del_init(&stuser->list);
227         stuser_put(stuser);
228         stdev->mrpc_busy = 0;
229
230         mrpc_cmd_submit(stdev);
231 }
232
233 static void mrpc_event_work(struct work_struct *work)
234 {
235         struct switchtec_dev *stdev;
236
237         stdev = container_of(work, struct switchtec_dev, mrpc_work);
238
239         dev_dbg(&stdev->dev, "%s\n", __func__);
240
241         mutex_lock(&stdev->mrpc_mutex);
242         cancel_delayed_work(&stdev->mrpc_timeout);
243         mrpc_complete_cmd(stdev);
244         mutex_unlock(&stdev->mrpc_mutex);
245 }
246
247 static void mrpc_timeout_work(struct work_struct *work)
248 {
249         struct switchtec_dev *stdev;
250         u32 status;
251
252         stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
253
254         dev_dbg(&stdev->dev, "%s\n", __func__);
255
256         mutex_lock(&stdev->mrpc_mutex);
257
258         if (stdev->dma_mrpc)
259                 status = stdev->dma_mrpc->status;
260         else
261                 status = ioread32(&stdev->mmio_mrpc->status);
262         if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
263                 schedule_delayed_work(&stdev->mrpc_timeout,
264                                       msecs_to_jiffies(500));
265                 goto out;
266         }
267
268         mrpc_complete_cmd(stdev);
269 out:
270         mutex_unlock(&stdev->mrpc_mutex);
271 }
272
273 static ssize_t device_version_show(struct device *dev,
274         struct device_attribute *attr, char *buf)
275 {
276         struct switchtec_dev *stdev = to_stdev(dev);
277         u32 ver;
278
279         ver = ioread32(&stdev->mmio_sys_info->device_version);
280
281         return sprintf(buf, "%x\n", ver);
282 }
283 static DEVICE_ATTR_RO(device_version);
284
285 static ssize_t fw_version_show(struct device *dev,
286         struct device_attribute *attr, char *buf)
287 {
288         struct switchtec_dev *stdev = to_stdev(dev);
289         u32 ver;
290
291         ver = ioread32(&stdev->mmio_sys_info->firmware_version);
292
293         return sprintf(buf, "%08x\n", ver);
294 }
295 static DEVICE_ATTR_RO(fw_version);
296
297 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
298 {
299         int i;
300
301         memcpy_fromio(buf, attr, len);
302         buf[len] = '\n';
303         buf[len + 1] = 0;
304
305         for (i = len - 1; i > 0; i--) {
306                 if (buf[i] != ' ')
307                         break;
308                 buf[i] = '\n';
309                 buf[i + 1] = 0;
310         }
311
312         return strlen(buf);
313 }
314
315 #define DEVICE_ATTR_SYS_INFO_STR(field) \
316 static ssize_t field ## _show(struct device *dev, \
317         struct device_attribute *attr, char *buf) \
318 { \
319         struct switchtec_dev *stdev = to_stdev(dev); \
320         struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
321         if (stdev->gen == SWITCHTEC_GEN3) \
322                 return io_string_show(buf, &si->gen3.field, \
323                                       sizeof(si->gen3.field)); \
324         else \
325                 return -ENOTSUPP; \
326 } \
327 \
328 static DEVICE_ATTR_RO(field)
329
330 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
331 DEVICE_ATTR_SYS_INFO_STR(product_id);
332 DEVICE_ATTR_SYS_INFO_STR(product_revision);
333
334 static ssize_t component_vendor_show(struct device *dev,
335                                      struct device_attribute *attr, char *buf)
336 {
337         struct switchtec_dev *stdev = to_stdev(dev);
338         struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
339
340         /* component_vendor field not supported after gen3 */
341         if (stdev->gen != SWITCHTEC_GEN3)
342                 return sprintf(buf, "none\n");
343
344         return io_string_show(buf, &si->gen3.component_vendor,
345                               sizeof(si->gen3.component_vendor));
346 }
347 static DEVICE_ATTR_RO(component_vendor);
348
349 static ssize_t component_id_show(struct device *dev,
350         struct device_attribute *attr, char *buf)
351 {
352         struct switchtec_dev *stdev = to_stdev(dev);
353         int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
354
355         /* component_id field not supported after gen3 */
356         if (stdev->gen != SWITCHTEC_GEN3)
357                 return sprintf(buf, "none\n");
358
359         return sprintf(buf, "PM%04X\n", id);
360 }
361 static DEVICE_ATTR_RO(component_id);
362
363 static ssize_t component_revision_show(struct device *dev,
364         struct device_attribute *attr, char *buf)
365 {
366         struct switchtec_dev *stdev = to_stdev(dev);
367         int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
368
369         /* component_revision field not supported after gen3 */
370         if (stdev->gen != SWITCHTEC_GEN3)
371                 return sprintf(buf, "255\n");
372
373         return sprintf(buf, "%d\n", rev);
374 }
375 static DEVICE_ATTR_RO(component_revision);
376
377 static ssize_t partition_show(struct device *dev,
378         struct device_attribute *attr, char *buf)
379 {
380         struct switchtec_dev *stdev = to_stdev(dev);
381
382         return sprintf(buf, "%d\n", stdev->partition);
383 }
384 static DEVICE_ATTR_RO(partition);
385
386 static ssize_t partition_count_show(struct device *dev,
387         struct device_attribute *attr, char *buf)
388 {
389         struct switchtec_dev *stdev = to_stdev(dev);
390
391         return sprintf(buf, "%d\n", stdev->partition_count);
392 }
393 static DEVICE_ATTR_RO(partition_count);
394
395 static struct attribute *switchtec_device_attrs[] = {
396         &dev_attr_device_version.attr,
397         &dev_attr_fw_version.attr,
398         &dev_attr_vendor_id.attr,
399         &dev_attr_product_id.attr,
400         &dev_attr_product_revision.attr,
401         &dev_attr_component_vendor.attr,
402         &dev_attr_component_id.attr,
403         &dev_attr_component_revision.attr,
404         &dev_attr_partition.attr,
405         &dev_attr_partition_count.attr,
406         NULL,
407 };
408
409 ATTRIBUTE_GROUPS(switchtec_device);
410
411 static int switchtec_dev_open(struct inode *inode, struct file *filp)
412 {
413         struct switchtec_dev *stdev;
414         struct switchtec_user *stuser;
415
416         stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
417
418         stuser = stuser_create(stdev);
419         if (IS_ERR(stuser))
420                 return PTR_ERR(stuser);
421
422         filp->private_data = stuser;
423         stream_open(inode, filp);
424
425         dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
426
427         return 0;
428 }
429
430 static int switchtec_dev_release(struct inode *inode, struct file *filp)
431 {
432         struct switchtec_user *stuser = filp->private_data;
433
434         stuser_put(stuser);
435
436         return 0;
437 }
438
439 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
440 {
441         if (mutex_lock_interruptible(&stdev->mrpc_mutex))
442                 return -EINTR;
443
444         if (!stdev->alive) {
445                 mutex_unlock(&stdev->mrpc_mutex);
446                 return -ENODEV;
447         }
448
449         return 0;
450 }
451
452 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
453                                    size_t size, loff_t *off)
454 {
455         struct switchtec_user *stuser = filp->private_data;
456         struct switchtec_dev *stdev = stuser->stdev;
457         int rc;
458
459         if (size < sizeof(stuser->cmd) ||
460             size > sizeof(stuser->cmd) + sizeof(stuser->data))
461                 return -EINVAL;
462
463         stuser->data_len = size - sizeof(stuser->cmd);
464
465         rc = lock_mutex_and_test_alive(stdev);
466         if (rc)
467                 return rc;
468
469         if (stuser->state != MRPC_IDLE) {
470                 rc = -EBADE;
471                 goto out;
472         }
473
474         rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
475         if (rc) {
476                 rc = -EFAULT;
477                 goto out;
478         }
479
480         data += sizeof(stuser->cmd);
481         rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
482         if (rc) {
483                 rc = -EFAULT;
484                 goto out;
485         }
486
487         rc = mrpc_queue_cmd(stuser);
488
489 out:
490         mutex_unlock(&stdev->mrpc_mutex);
491
492         if (rc)
493                 return rc;
494
495         return size;
496 }
497
498 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
499                                   size_t size, loff_t *off)
500 {
501         struct switchtec_user *stuser = filp->private_data;
502         struct switchtec_dev *stdev = stuser->stdev;
503         int rc;
504
505         if (size < sizeof(stuser->cmd) ||
506             size > sizeof(stuser->cmd) + sizeof(stuser->data))
507                 return -EINVAL;
508
509         rc = lock_mutex_and_test_alive(stdev);
510         if (rc)
511                 return rc;
512
513         if (stuser->state == MRPC_IDLE) {
514                 mutex_unlock(&stdev->mrpc_mutex);
515                 return -EBADE;
516         }
517
518         stuser->read_len = size - sizeof(stuser->return_code);
519
520         mutex_unlock(&stdev->mrpc_mutex);
521
522         if (filp->f_flags & O_NONBLOCK) {
523                 if (!try_wait_for_completion(&stuser->comp))
524                         return -EAGAIN;
525         } else {
526                 rc = wait_for_completion_interruptible(&stuser->comp);
527                 if (rc < 0)
528                         return rc;
529         }
530
531         rc = lock_mutex_and_test_alive(stdev);
532         if (rc)
533                 return rc;
534
535         if (stuser->state != MRPC_DONE) {
536                 mutex_unlock(&stdev->mrpc_mutex);
537                 return -EBADE;
538         }
539
540         rc = copy_to_user(data, &stuser->return_code,
541                           sizeof(stuser->return_code));
542         if (rc) {
543                 rc = -EFAULT;
544                 goto out;
545         }
546
547         data += sizeof(stuser->return_code);
548         rc = copy_to_user(data, &stuser->data,
549                           size - sizeof(stuser->return_code));
550         if (rc) {
551                 rc = -EFAULT;
552                 goto out;
553         }
554
555         stuser_set_state(stuser, MRPC_IDLE);
556
557 out:
558         mutex_unlock(&stdev->mrpc_mutex);
559
560         if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
561                 return size;
562         else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
563                 return -ENXIO;
564         else
565                 return -EBADMSG;
566 }
567
568 static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
569 {
570         struct switchtec_user *stuser = filp->private_data;
571         struct switchtec_dev *stdev = stuser->stdev;
572         __poll_t ret = 0;
573
574         poll_wait(filp, &stuser->comp.wait, wait);
575         poll_wait(filp, &stdev->event_wq, wait);
576
577         if (lock_mutex_and_test_alive(stdev))
578                 return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
579
580         mutex_unlock(&stdev->mrpc_mutex);
581
582         if (try_wait_for_completion(&stuser->comp))
583                 ret |= EPOLLIN | EPOLLRDNORM;
584
585         if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
586                 ret |= EPOLLPRI | EPOLLRDBAND;
587
588         return ret;
589 }
590
591 static int ioctl_flash_info(struct switchtec_dev *stdev,
592                             struct switchtec_ioctl_flash_info __user *uinfo)
593 {
594         struct switchtec_ioctl_flash_info info = {0};
595         struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
596
597         if (stdev->gen == SWITCHTEC_GEN3) {
598                 info.flash_length = ioread32(&fi->gen3.flash_length);
599                 info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
600         } else {
601                 return -ENOTSUPP;
602         }
603
604         if (copy_to_user(uinfo, &info, sizeof(info)))
605                 return -EFAULT;
606
607         return 0;
608 }
609
610 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
611                              struct partition_info __iomem *pi)
612 {
613         info->address = ioread32(&pi->address);
614         info->length = ioread32(&pi->length);
615 }
616
617 static int flash_part_info_gen3(struct switchtec_dev *stdev,
618                 struct switchtec_ioctl_flash_part_info *info)
619 {
620         struct flash_info_regs_gen3 __iomem *fi =
621                 &stdev->mmio_flash_info->gen3;
622         struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
623         u32 active_addr = -1;
624
625         switch (info->flash_partition) {
626         case SWITCHTEC_IOCTL_PART_CFG0:
627                 active_addr = ioread32(&fi->active_cfg);
628                 set_fw_info_part(info, &fi->cfg0);
629                 if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
630                         info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
631                 break;
632         case SWITCHTEC_IOCTL_PART_CFG1:
633                 active_addr = ioread32(&fi->active_cfg);
634                 set_fw_info_part(info, &fi->cfg1);
635                 if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
636                         info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
637                 break;
638         case SWITCHTEC_IOCTL_PART_IMG0:
639                 active_addr = ioread32(&fi->active_img);
640                 set_fw_info_part(info, &fi->img0);
641                 if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
642                         info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
643                 break;
644         case SWITCHTEC_IOCTL_PART_IMG1:
645                 active_addr = ioread32(&fi->active_img);
646                 set_fw_info_part(info, &fi->img1);
647                 if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
648                         info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
649                 break;
650         case SWITCHTEC_IOCTL_PART_NVLOG:
651                 set_fw_info_part(info, &fi->nvlog);
652                 break;
653         case SWITCHTEC_IOCTL_PART_VENDOR0:
654                 set_fw_info_part(info, &fi->vendor[0]);
655                 break;
656         case SWITCHTEC_IOCTL_PART_VENDOR1:
657                 set_fw_info_part(info, &fi->vendor[1]);
658                 break;
659         case SWITCHTEC_IOCTL_PART_VENDOR2:
660                 set_fw_info_part(info, &fi->vendor[2]);
661                 break;
662         case SWITCHTEC_IOCTL_PART_VENDOR3:
663                 set_fw_info_part(info, &fi->vendor[3]);
664                 break;
665         case SWITCHTEC_IOCTL_PART_VENDOR4:
666                 set_fw_info_part(info, &fi->vendor[4]);
667                 break;
668         case SWITCHTEC_IOCTL_PART_VENDOR5:
669                 set_fw_info_part(info, &fi->vendor[5]);
670                 break;
671         case SWITCHTEC_IOCTL_PART_VENDOR6:
672                 set_fw_info_part(info, &fi->vendor[6]);
673                 break;
674         case SWITCHTEC_IOCTL_PART_VENDOR7:
675                 set_fw_info_part(info, &fi->vendor[7]);
676                 break;
677         default:
678                 return -EINVAL;
679         }
680
681         if (info->address == active_addr)
682                 info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
683
684         return 0;
685 }
686
687 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
688                 struct switchtec_ioctl_flash_part_info __user *uinfo)
689 {
690         int ret;
691         struct switchtec_ioctl_flash_part_info info = {0};
692
693         if (copy_from_user(&info, uinfo, sizeof(info)))
694                 return -EFAULT;
695
696         if (stdev->gen == SWITCHTEC_GEN3) {
697                 ret = flash_part_info_gen3(stdev, &info);
698                 if (ret)
699                         return ret;
700         } else {
701                 return -ENOTSUPP;
702         }
703
704         if (copy_to_user(uinfo, &info, sizeof(info)))
705                 return -EFAULT;
706
707         return 0;
708 }
709
710 static int ioctl_event_summary(struct switchtec_dev *stdev,
711         struct switchtec_user *stuser,
712         struct switchtec_ioctl_event_summary __user *usum,
713         size_t size)
714 {
715         struct switchtec_ioctl_event_summary *s;
716         int i;
717         u32 reg;
718         int ret = 0;
719
720         s = kzalloc(sizeof(*s), GFP_KERNEL);
721         if (!s)
722                 return -ENOMEM;
723
724         s->global = ioread32(&stdev->mmio_sw_event->global_summary);
725         s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
726         s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
727
728         for (i = 0; i < stdev->partition_count; i++) {
729                 reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
730                 s->part[i] = reg;
731         }
732
733         for (i = 0; i < stdev->pff_csr_count; i++) {
734                 reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
735                 s->pff[i] = reg;
736         }
737
738         if (copy_to_user(usum, s, size)) {
739                 ret = -EFAULT;
740                 goto error_case;
741         }
742
743         stuser->event_cnt = atomic_read(&stdev->event_cnt);
744
745 error_case:
746         kfree(s);
747         return ret;
748 }
749
750 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
751                                   size_t offset, int index)
752 {
753         return (void __iomem *)stdev->mmio_sw_event + offset;
754 }
755
756 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
757                                 size_t offset, int index)
758 {
759         return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
760 }
761
762 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
763                                size_t offset, int index)
764 {
765         return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
766 }
767
768 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
769 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
770 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
771
772 static const struct event_reg {
773         size_t offset;
774         u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
775                                 size_t offset, int index);
776 } event_regs[] = {
777         EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
778         EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
779         EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
780         EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
781         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
782         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
783         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
784         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
785         EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
786         EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
787                twi_mrpc_comp_async_hdr),
788         EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
789         EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
790                cli_mrpc_comp_async_hdr),
791         EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
792         EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
793         EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
794         EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
795         EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
796         EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
797         EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
798                intercomm_notify_hdr),
799         EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
800         EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
801         EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
802         EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
803         EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
804         EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
805         EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
806         EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
807         EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
808         EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
809         EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
810         EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
811         EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
812 };
813
814 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
815                                    int event_id, int index)
816 {
817         size_t off;
818
819         if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
820                 return ERR_PTR(-EINVAL);
821
822         off = event_regs[event_id].offset;
823
824         if (event_regs[event_id].map_reg == part_ev_reg) {
825                 if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
826                         index = stdev->partition;
827                 else if (index < 0 || index >= stdev->partition_count)
828                         return ERR_PTR(-EINVAL);
829         } else if (event_regs[event_id].map_reg == pff_ev_reg) {
830                 if (index < 0 || index >= stdev->pff_csr_count)
831                         return ERR_PTR(-EINVAL);
832         }
833
834         return event_regs[event_id].map_reg(stdev, off, index);
835 }
836
837 static int event_ctl(struct switchtec_dev *stdev,
838                      struct switchtec_ioctl_event_ctl *ctl)
839 {
840         int i;
841         u32 __iomem *reg;
842         u32 hdr;
843
844         reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
845         if (IS_ERR(reg))
846                 return PTR_ERR(reg);
847
848         hdr = ioread32(reg);
849         for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
850                 ctl->data[i] = ioread32(&reg[i + 1]);
851
852         ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
853         ctl->count = (hdr >> 5) & 0xFF;
854
855         if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
856                 hdr &= ~SWITCHTEC_EVENT_CLEAR;
857         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
858                 hdr |= SWITCHTEC_EVENT_EN_IRQ;
859         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
860                 hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
861         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
862                 hdr |= SWITCHTEC_EVENT_EN_LOG;
863         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
864                 hdr &= ~SWITCHTEC_EVENT_EN_LOG;
865         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
866                 hdr |= SWITCHTEC_EVENT_EN_CLI;
867         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
868                 hdr &= ~SWITCHTEC_EVENT_EN_CLI;
869         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
870                 hdr |= SWITCHTEC_EVENT_FATAL;
871         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
872                 hdr &= ~SWITCHTEC_EVENT_FATAL;
873
874         if (ctl->flags)
875                 iowrite32(hdr, reg);
876
877         ctl->flags = 0;
878         if (hdr & SWITCHTEC_EVENT_EN_IRQ)
879                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
880         if (hdr & SWITCHTEC_EVENT_EN_LOG)
881                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
882         if (hdr & SWITCHTEC_EVENT_EN_CLI)
883                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
884         if (hdr & SWITCHTEC_EVENT_FATAL)
885                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
886
887         return 0;
888 }
889
890 static int ioctl_event_ctl(struct switchtec_dev *stdev,
891         struct switchtec_ioctl_event_ctl __user *uctl)
892 {
893         int ret;
894         int nr_idxs;
895         unsigned int event_flags;
896         struct switchtec_ioctl_event_ctl ctl;
897
898         if (copy_from_user(&ctl, uctl, sizeof(ctl)))
899                 return -EFAULT;
900
901         if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
902                 return -EINVAL;
903
904         if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
905                 return -EINVAL;
906
907         if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
908                 if (event_regs[ctl.event_id].map_reg == global_ev_reg)
909                         nr_idxs = 1;
910                 else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
911                         nr_idxs = stdev->partition_count;
912                 else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
913                         nr_idxs = stdev->pff_csr_count;
914                 else
915                         return -EINVAL;
916
917                 event_flags = ctl.flags;
918                 for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
919                         ctl.flags = event_flags;
920                         ret = event_ctl(stdev, &ctl);
921                         if (ret < 0)
922                                 return ret;
923                 }
924         } else {
925                 ret = event_ctl(stdev, &ctl);
926                 if (ret < 0)
927                         return ret;
928         }
929
930         if (copy_to_user(uctl, &ctl, sizeof(ctl)))
931                 return -EFAULT;
932
933         return 0;
934 }
935
936 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
937                              struct switchtec_ioctl_pff_port *up)
938 {
939         int i, part;
940         u32 reg;
941         struct part_cfg_regs *pcfg;
942         struct switchtec_ioctl_pff_port p;
943
944         if (copy_from_user(&p, up, sizeof(p)))
945                 return -EFAULT;
946
947         p.port = -1;
948         for (part = 0; part < stdev->partition_count; part++) {
949                 pcfg = &stdev->mmio_part_cfg_all[part];
950                 p.partition = part;
951
952                 reg = ioread32(&pcfg->usp_pff_inst_id);
953                 if (reg == p.pff) {
954                         p.port = 0;
955                         break;
956                 }
957
958                 reg = ioread32(&pcfg->vep_pff_inst_id);
959                 if (reg == p.pff) {
960                         p.port = SWITCHTEC_IOCTL_PFF_VEP;
961                         break;
962                 }
963
964                 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
965                         reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
966                         if (reg != p.pff)
967                                 continue;
968
969                         p.port = i + 1;
970                         break;
971                 }
972
973                 if (p.port != -1)
974                         break;
975         }
976
977         if (copy_to_user(up, &p, sizeof(p)))
978                 return -EFAULT;
979
980         return 0;
981 }
982
983 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
984                              struct switchtec_ioctl_pff_port *up)
985 {
986         struct switchtec_ioctl_pff_port p;
987         struct part_cfg_regs *pcfg;
988
989         if (copy_from_user(&p, up, sizeof(p)))
990                 return -EFAULT;
991
992         if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
993                 pcfg = stdev->mmio_part_cfg;
994         else if (p.partition < stdev->partition_count)
995                 pcfg = &stdev->mmio_part_cfg_all[p.partition];
996         else
997                 return -EINVAL;
998
999         switch (p.port) {
1000         case 0:
1001                 p.pff = ioread32(&pcfg->usp_pff_inst_id);
1002                 break;
1003         case SWITCHTEC_IOCTL_PFF_VEP:
1004                 p.pff = ioread32(&pcfg->vep_pff_inst_id);
1005                 break;
1006         default:
1007                 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
1008                         return -EINVAL;
1009                 p.port = array_index_nospec(p.port,
1010                                         ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
1011                 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
1012                 break;
1013         }
1014
1015         if (copy_to_user(up, &p, sizeof(p)))
1016                 return -EFAULT;
1017
1018         return 0;
1019 }
1020
1021 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
1022                                 unsigned long arg)
1023 {
1024         struct switchtec_user *stuser = filp->private_data;
1025         struct switchtec_dev *stdev = stuser->stdev;
1026         int rc;
1027         void __user *argp = (void __user *)arg;
1028
1029         rc = lock_mutex_and_test_alive(stdev);
1030         if (rc)
1031                 return rc;
1032
1033         switch (cmd) {
1034         case SWITCHTEC_IOCTL_FLASH_INFO:
1035                 rc = ioctl_flash_info(stdev, argp);
1036                 break;
1037         case SWITCHTEC_IOCTL_FLASH_PART_INFO:
1038                 rc = ioctl_flash_part_info(stdev, argp);
1039                 break;
1040         case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
1041                 rc = ioctl_event_summary(stdev, stuser, argp,
1042                                          sizeof(struct switchtec_ioctl_event_summary_legacy));
1043                 break;
1044         case SWITCHTEC_IOCTL_EVENT_CTL:
1045                 rc = ioctl_event_ctl(stdev, argp);
1046                 break;
1047         case SWITCHTEC_IOCTL_PFF_TO_PORT:
1048                 rc = ioctl_pff_to_port(stdev, argp);
1049                 break;
1050         case SWITCHTEC_IOCTL_PORT_TO_PFF:
1051                 rc = ioctl_port_to_pff(stdev, argp);
1052                 break;
1053         case SWITCHTEC_IOCTL_EVENT_SUMMARY:
1054                 rc = ioctl_event_summary(stdev, stuser, argp,
1055                                          sizeof(struct switchtec_ioctl_event_summary));
1056                 break;
1057         default:
1058                 rc = -ENOTTY;
1059                 break;
1060         }
1061
1062         mutex_unlock(&stdev->mrpc_mutex);
1063         return rc;
1064 }
1065
1066 static const struct file_operations switchtec_fops = {
1067         .owner = THIS_MODULE,
1068         .open = switchtec_dev_open,
1069         .release = switchtec_dev_release,
1070         .write = switchtec_dev_write,
1071         .read = switchtec_dev_read,
1072         .poll = switchtec_dev_poll,
1073         .unlocked_ioctl = switchtec_dev_ioctl,
1074         .compat_ioctl = compat_ptr_ioctl,
1075 };
1076
1077 static void link_event_work(struct work_struct *work)
1078 {
1079         struct switchtec_dev *stdev;
1080
1081         stdev = container_of(work, struct switchtec_dev, link_event_work);
1082
1083         if (stdev->link_notifier)
1084                 stdev->link_notifier(stdev);
1085 }
1086
1087 static void check_link_state_events(struct switchtec_dev *stdev)
1088 {
1089         int idx;
1090         u32 reg;
1091         int count;
1092         int occurred = 0;
1093
1094         for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1095                 reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
1096                 dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
1097                 count = (reg >> 5) & 0xFF;
1098
1099                 if (count != stdev->link_event_count[idx]) {
1100                         occurred = 1;
1101                         stdev->link_event_count[idx] = count;
1102                 }
1103         }
1104
1105         if (occurred)
1106                 schedule_work(&stdev->link_event_work);
1107 }
1108
1109 static void enable_link_state_events(struct switchtec_dev *stdev)
1110 {
1111         int idx;
1112
1113         for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1114                 iowrite32(SWITCHTEC_EVENT_CLEAR |
1115                           SWITCHTEC_EVENT_EN_IRQ,
1116                           &stdev->mmio_pff_csr[idx].link_state_hdr);
1117         }
1118 }
1119
1120 static void enable_dma_mrpc(struct switchtec_dev *stdev)
1121 {
1122         writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
1123         flush_wc_buf(stdev);
1124         iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
1125 }
1126
1127 static void stdev_release(struct device *dev)
1128 {
1129         struct switchtec_dev *stdev = to_stdev(dev);
1130
1131         if (stdev->dma_mrpc) {
1132                 iowrite32(0, &stdev->mmio_mrpc->dma_en);
1133                 flush_wc_buf(stdev);
1134                 writeq(0, &stdev->mmio_mrpc->dma_addr);
1135                 dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
1136                                 stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
1137         }
1138         kfree(stdev);
1139 }
1140
1141 static void stdev_kill(struct switchtec_dev *stdev)
1142 {
1143         struct switchtec_user *stuser, *tmpuser;
1144
1145         pci_clear_master(stdev->pdev);
1146
1147         cancel_delayed_work_sync(&stdev->mrpc_timeout);
1148
1149         /* Mark the hardware as unavailable and complete all completions */
1150         mutex_lock(&stdev->mrpc_mutex);
1151         stdev->alive = false;
1152
1153         /* Wake up and kill any users waiting on an MRPC request */
1154         list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1155                 complete_all(&stuser->comp);
1156                 list_del_init(&stuser->list);
1157                 stuser_put(stuser);
1158         }
1159
1160         mutex_unlock(&stdev->mrpc_mutex);
1161
1162         /* Wake up any users waiting on event_wq */
1163         wake_up_interruptible(&stdev->event_wq);
1164 }
1165
1166 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1167 {
1168         struct switchtec_dev *stdev;
1169         int minor;
1170         struct device *dev;
1171         struct cdev *cdev;
1172         int rc;
1173
1174         stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1175                              dev_to_node(&pdev->dev));
1176         if (!stdev)
1177                 return ERR_PTR(-ENOMEM);
1178
1179         stdev->alive = true;
1180         stdev->pdev = pdev;
1181         INIT_LIST_HEAD(&stdev->mrpc_queue);
1182         mutex_init(&stdev->mrpc_mutex);
1183         stdev->mrpc_busy = 0;
1184         INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1185         INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1186         INIT_WORK(&stdev->link_event_work, link_event_work);
1187         init_waitqueue_head(&stdev->event_wq);
1188         atomic_set(&stdev->event_cnt, 0);
1189
1190         dev = &stdev->dev;
1191         device_initialize(dev);
1192         dev->class = switchtec_class;
1193         dev->parent = &pdev->dev;
1194         dev->groups = switchtec_device_groups;
1195         dev->release = stdev_release;
1196
1197         minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1198                                GFP_KERNEL);
1199         if (minor < 0) {
1200                 rc = minor;
1201                 goto err_put;
1202         }
1203
1204         dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1205         dev_set_name(dev, "switchtec%d", minor);
1206
1207         cdev = &stdev->cdev;
1208         cdev_init(cdev, &switchtec_fops);
1209         cdev->owner = THIS_MODULE;
1210
1211         return stdev;
1212
1213 err_put:
1214         put_device(&stdev->dev);
1215         return ERR_PTR(rc);
1216 }
1217
1218 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1219 {
1220         size_t off = event_regs[eid].offset;
1221         u32 __iomem *hdr_reg;
1222         u32 hdr;
1223
1224         hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1225         hdr = ioread32(hdr_reg);
1226
1227         if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1228                 return 0;
1229
1230         dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1231         hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1232         iowrite32(hdr, hdr_reg);
1233
1234         return 1;
1235 }
1236
1237 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1238 {
1239         int idx;
1240         int count = 0;
1241
1242         if (event_regs[eid].map_reg == part_ev_reg) {
1243                 for (idx = 0; idx < stdev->partition_count; idx++)
1244                         count += mask_event(stdev, eid, idx);
1245         } else if (event_regs[eid].map_reg == pff_ev_reg) {
1246                 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1247                         if (!stdev->pff_local[idx])
1248                                 continue;
1249
1250                         count += mask_event(stdev, eid, idx);
1251                 }
1252         } else {
1253                 count += mask_event(stdev, eid, 0);
1254         }
1255
1256         return count;
1257 }
1258
1259 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1260 {
1261         struct switchtec_dev *stdev = dev;
1262         u32 reg;
1263         irqreturn_t ret = IRQ_NONE;
1264         int eid, event_count = 0;
1265
1266         reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1267         if (reg & SWITCHTEC_EVENT_OCCURRED) {
1268                 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1269                 ret = IRQ_HANDLED;
1270                 schedule_work(&stdev->mrpc_work);
1271                 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1272         }
1273
1274         check_link_state_events(stdev);
1275
1276         for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
1277                 if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
1278                     eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
1279                         continue;
1280
1281                 event_count += mask_all_events(stdev, eid);
1282         }
1283
1284         if (event_count) {
1285                 atomic_inc(&stdev->event_cnt);
1286                 wake_up_interruptible(&stdev->event_wq);
1287                 dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1288                         event_count);
1289                 return IRQ_HANDLED;
1290         }
1291
1292         return ret;
1293 }
1294
1295
1296 static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
1297 {
1298         struct switchtec_dev *stdev = dev;
1299         irqreturn_t ret = IRQ_NONE;
1300
1301         iowrite32(SWITCHTEC_EVENT_CLEAR |
1302                   SWITCHTEC_EVENT_EN_IRQ,
1303                   &stdev->mmio_part_cfg->mrpc_comp_hdr);
1304         schedule_work(&stdev->mrpc_work);
1305
1306         ret = IRQ_HANDLED;
1307         return ret;
1308 }
1309
1310 static int switchtec_init_isr(struct switchtec_dev *stdev)
1311 {
1312         int nvecs;
1313         int event_irq;
1314         int dma_mrpc_irq;
1315         int rc;
1316
1317         if (nirqs < 4)
1318                 nirqs = 4;
1319
1320         nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
1321                                       PCI_IRQ_MSIX | PCI_IRQ_MSI |
1322                                       PCI_IRQ_VIRTUAL);
1323         if (nvecs < 0)
1324                 return nvecs;
1325
1326         event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
1327         if (event_irq < 0 || event_irq >= nvecs)
1328                 return -EFAULT;
1329
1330         event_irq = pci_irq_vector(stdev->pdev, event_irq);
1331         if (event_irq < 0)
1332                 return event_irq;
1333
1334         rc = devm_request_irq(&stdev->pdev->dev, event_irq,
1335                                 switchtec_event_isr, 0,
1336                                 KBUILD_MODNAME, stdev);
1337
1338         if (rc)
1339                 return rc;
1340
1341         if (!stdev->dma_mrpc)
1342                 return rc;
1343
1344         dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
1345         if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
1346                 return -EFAULT;
1347
1348         dma_mrpc_irq  = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
1349         if (dma_mrpc_irq < 0)
1350                 return dma_mrpc_irq;
1351
1352         rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
1353                                 switchtec_dma_mrpc_isr, 0,
1354                                 KBUILD_MODNAME, stdev);
1355
1356         return rc;
1357 }
1358
1359 static void init_pff(struct switchtec_dev *stdev)
1360 {
1361         int i;
1362         u32 reg;
1363         struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1364
1365         for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1366                 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1367                 if (reg != PCI_VENDOR_ID_MICROSEMI)
1368                         break;
1369         }
1370
1371         stdev->pff_csr_count = i;
1372
1373         reg = ioread32(&pcfg->usp_pff_inst_id);
1374         if (reg < stdev->pff_csr_count)
1375                 stdev->pff_local[reg] = 1;
1376
1377         reg = ioread32(&pcfg->vep_pff_inst_id);
1378         if (reg < stdev->pff_csr_count)
1379                 stdev->pff_local[reg] = 1;
1380
1381         for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1382                 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1383                 if (reg < stdev->pff_csr_count)
1384                         stdev->pff_local[reg] = 1;
1385         }
1386 }
1387
1388 static int switchtec_init_pci(struct switchtec_dev *stdev,
1389                               struct pci_dev *pdev)
1390 {
1391         int rc;
1392         void __iomem *map;
1393         unsigned long res_start, res_len;
1394         u32 __iomem *part_id;
1395
1396         rc = pcim_enable_device(pdev);
1397         if (rc)
1398                 return rc;
1399
1400         rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1401         if (rc)
1402                 return rc;
1403
1404         pci_set_master(pdev);
1405
1406         res_start = pci_resource_start(pdev, 0);
1407         res_len = pci_resource_len(pdev, 0);
1408
1409         if (!devm_request_mem_region(&pdev->dev, res_start,
1410                                      res_len, KBUILD_MODNAME))
1411                 return -EBUSY;
1412
1413         stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
1414                                            SWITCHTEC_GAS_TOP_CFG_OFFSET);
1415         if (!stdev->mmio_mrpc)
1416                 return -ENOMEM;
1417
1418         map = devm_ioremap(&pdev->dev,
1419                            res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
1420                            res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
1421         if (!map)
1422                 return -ENOMEM;
1423
1424         stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
1425         stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1426         stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1427         stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1428         stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1429
1430         if (stdev->gen == SWITCHTEC_GEN3)
1431                 part_id = &stdev->mmio_sys_info->gen3.partition_id;
1432         else
1433                 return -ENOTSUPP;
1434
1435         stdev->partition = ioread8(part_id);
1436         stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1437         stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1438         stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1439         stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1440
1441         if (stdev->partition_count < 1)
1442                 stdev->partition_count = 1;
1443
1444         init_pff(stdev);
1445
1446         pci_set_drvdata(pdev, stdev);
1447
1448         if (!use_dma_mrpc)
1449                 return 0;
1450
1451         if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1452                 return 0;
1453
1454         stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1455                                              sizeof(*stdev->dma_mrpc),
1456                                              &stdev->dma_mrpc_dma_addr,
1457                                              GFP_KERNEL);
1458         if (stdev->dma_mrpc == NULL)
1459                 return -ENOMEM;
1460
1461         return 0;
1462 }
1463
1464 static int switchtec_pci_probe(struct pci_dev *pdev,
1465                                const struct pci_device_id *id)
1466 {
1467         struct switchtec_dev *stdev;
1468         int rc;
1469
1470         if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
1471                 request_module_nowait("ntb_hw_switchtec");
1472
1473         stdev = stdev_create(pdev);
1474         if (IS_ERR(stdev))
1475                 return PTR_ERR(stdev);
1476
1477         stdev->gen = id->driver_data;
1478
1479         rc = switchtec_init_pci(stdev, pdev);
1480         if (rc)
1481                 goto err_put;
1482
1483         rc = switchtec_init_isr(stdev);
1484         if (rc) {
1485                 dev_err(&stdev->dev, "failed to init isr.\n");
1486                 goto err_put;
1487         }
1488
1489         iowrite32(SWITCHTEC_EVENT_CLEAR |
1490                   SWITCHTEC_EVENT_EN_IRQ,
1491                   &stdev->mmio_part_cfg->mrpc_comp_hdr);
1492         enable_link_state_events(stdev);
1493
1494         if (stdev->dma_mrpc)
1495                 enable_dma_mrpc(stdev);
1496
1497         rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1498         if (rc)
1499                 goto err_devadd;
1500
1501         dev_info(&stdev->dev, "Management device registered.\n");
1502
1503         return 0;
1504
1505 err_devadd:
1506         stdev_kill(stdev);
1507 err_put:
1508         ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1509         put_device(&stdev->dev);
1510         return rc;
1511 }
1512
1513 static void switchtec_pci_remove(struct pci_dev *pdev)
1514 {
1515         struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1516
1517         pci_set_drvdata(pdev, NULL);
1518
1519         cdev_device_del(&stdev->cdev, &stdev->dev);
1520         ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1521         dev_info(&stdev->dev, "unregistered.\n");
1522         stdev_kill(stdev);
1523         put_device(&stdev->dev);
1524 }
1525
1526 #define SWITCHTEC_PCI_DEVICE(device_id, gen) \
1527         { \
1528                 .vendor     = PCI_VENDOR_ID_MICROSEMI, \
1529                 .device     = device_id, \
1530                 .subvendor  = PCI_ANY_ID, \
1531                 .subdevice  = PCI_ANY_ID, \
1532                 .class      = (PCI_CLASS_MEMORY_OTHER << 8), \
1533                 .class_mask = 0xFFFFFFFF, \
1534                 .driver_data = gen, \
1535         }, \
1536         { \
1537                 .vendor     = PCI_VENDOR_ID_MICROSEMI, \
1538                 .device     = device_id, \
1539                 .subvendor  = PCI_ANY_ID, \
1540                 .subdevice  = PCI_ANY_ID, \
1541                 .class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
1542                 .class_mask = 0xFFFFFFFF, \
1543                 .driver_data = gen, \
1544         }
1545
1546 static const struct pci_device_id switchtec_pci_tbl[] = {
1547         SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  //PFX 24xG3
1548         SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  //PFX 32xG3
1549         SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3),  //PFX 48xG3
1550         SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3),  //PFX 64xG3
1551         SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3),  //PFX 80xG3
1552         SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3),  //PFX 96xG3
1553         SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3),  //PSX 24xG3
1554         SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3),  //PSX 32xG3
1555         SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3),  //PSX 48xG3
1556         SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3),  //PSX 64xG3
1557         SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3),  //PSX 80xG3
1558         SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3),  //PSX 96xG3
1559         SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3),  //PAX 24XG3
1560         SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3),  //PAX 32XG3
1561         SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3),  //PAX 48XG3
1562         SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3),  //PAX 64XG3
1563         SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3),  //PAX 80XG3
1564         SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3),  //PAX 96XG3
1565         SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3),  //PFXL 24XG3
1566         SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3),  //PFXL 32XG3
1567         SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3),  //PFXL 48XG3
1568         SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3),  //PFXL 64XG3
1569         SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3),  //PFXL 80XG3
1570         SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3),  //PFXL 96XG3
1571         SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3),  //PFXI 24XG3
1572         SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3),  //PFXI 32XG3
1573         SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3),  //PFXI 48XG3
1574         SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3),  //PFXI 64XG3
1575         SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3),  //PFXI 80XG3
1576         SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3),  //PFXI 96XG3
1577         {0}
1578 };
1579 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1580
1581 static struct pci_driver switchtec_pci_driver = {
1582         .name           = KBUILD_MODNAME,
1583         .id_table       = switchtec_pci_tbl,
1584         .probe          = switchtec_pci_probe,
1585         .remove         = switchtec_pci_remove,
1586 };
1587
1588 static int __init switchtec_init(void)
1589 {
1590         int rc;
1591
1592         rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1593                                  "switchtec");
1594         if (rc)
1595                 return rc;
1596
1597         switchtec_class = class_create(THIS_MODULE, "switchtec");
1598         if (IS_ERR(switchtec_class)) {
1599                 rc = PTR_ERR(switchtec_class);
1600                 goto err_create_class;
1601         }
1602
1603         rc = pci_register_driver(&switchtec_pci_driver);
1604         if (rc)
1605                 goto err_pci_register;
1606
1607         pr_info(KBUILD_MODNAME ": loaded.\n");
1608
1609         return 0;
1610
1611 err_pci_register:
1612         class_destroy(switchtec_class);
1613
1614 err_create_class:
1615         unregister_chrdev_region(switchtec_devt, max_devices);
1616
1617         return rc;
1618 }
1619 module_init(switchtec_init);
1620
1621 static void __exit switchtec_exit(void)
1622 {
1623         pci_unregister_driver(&switchtec_pci_driver);
1624         class_destroy(switchtec_class);
1625         unregister_chrdev_region(switchtec_devt, max_devices);
1626         ida_destroy(&switchtec_minor_ida);
1627
1628         pr_info(KBUILD_MODNAME ": unloaded.\n");
1629 }
1630 module_exit(switchtec_exit);