vfs: do bulk POLL* -> EPOLL* replacement
[linux-2.6-microblaze.git] / drivers / pci / switch / switchtec.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
9
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16
17 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
18 MODULE_VERSION("0.1");
19 MODULE_LICENSE("GPL");
20 MODULE_AUTHOR("Microsemi Corporation");
21
22 static int max_devices = 16;
23 module_param(max_devices, int, 0644);
24 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
25
26 static dev_t switchtec_devt;
27 static DEFINE_IDA(switchtec_minor_ida);
28
29 struct class *switchtec_class;
30 EXPORT_SYMBOL_GPL(switchtec_class);
31
32 enum mrpc_state {
33         MRPC_IDLE = 0,
34         MRPC_QUEUED,
35         MRPC_RUNNING,
36         MRPC_DONE,
37 };
38
39 struct switchtec_user {
40         struct switchtec_dev *stdev;
41
42         enum mrpc_state state;
43
44         struct completion comp;
45         struct kref kref;
46         struct list_head list;
47
48         u32 cmd;
49         u32 status;
50         u32 return_code;
51         size_t data_len;
52         size_t read_len;
53         unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
54         int event_cnt;
55 };
56
57 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
58 {
59         struct switchtec_user *stuser;
60
61         stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
62         if (!stuser)
63                 return ERR_PTR(-ENOMEM);
64
65         get_device(&stdev->dev);
66         stuser->stdev = stdev;
67         kref_init(&stuser->kref);
68         INIT_LIST_HEAD(&stuser->list);
69         init_completion(&stuser->comp);
70         stuser->event_cnt = atomic_read(&stdev->event_cnt);
71
72         dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
73
74         return stuser;
75 }
76
77 static void stuser_free(struct kref *kref)
78 {
79         struct switchtec_user *stuser;
80
81         stuser = container_of(kref, struct switchtec_user, kref);
82
83         dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
84
85         put_device(&stuser->stdev->dev);
86         kfree(stuser);
87 }
88
89 static void stuser_put(struct switchtec_user *stuser)
90 {
91         kref_put(&stuser->kref, stuser_free);
92 }
93
94 static void stuser_set_state(struct switchtec_user *stuser,
95                              enum mrpc_state state)
96 {
97         /* requires the mrpc_mutex to already be held when called */
98
99         const char * const state_names[] = {
100                 [MRPC_IDLE] = "IDLE",
101                 [MRPC_QUEUED] = "QUEUED",
102                 [MRPC_RUNNING] = "RUNNING",
103                 [MRPC_DONE] = "DONE",
104         };
105
106         stuser->state = state;
107
108         dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
109                 stuser, state_names[state]);
110 }
111
112 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
113
114 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
115 {
116         /* requires the mrpc_mutex to already be held when called */
117
118         struct switchtec_user *stuser;
119
120         if (stdev->mrpc_busy)
121                 return;
122
123         if (list_empty(&stdev->mrpc_queue))
124                 return;
125
126         stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
127                             list);
128
129         stuser_set_state(stuser, MRPC_RUNNING);
130         stdev->mrpc_busy = 1;
131         memcpy_toio(&stdev->mmio_mrpc->input_data,
132                     stuser->data, stuser->data_len);
133         iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
134
135         stuser->status = ioread32(&stdev->mmio_mrpc->status);
136         if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
137                 mrpc_complete_cmd(stdev);
138
139         schedule_delayed_work(&stdev->mrpc_timeout,
140                               msecs_to_jiffies(500));
141 }
142
143 static int mrpc_queue_cmd(struct switchtec_user *stuser)
144 {
145         /* requires the mrpc_mutex to already be held when called */
146
147         struct switchtec_dev *stdev = stuser->stdev;
148
149         kref_get(&stuser->kref);
150         stuser->read_len = sizeof(stuser->data);
151         stuser_set_state(stuser, MRPC_QUEUED);
152         init_completion(&stuser->comp);
153         list_add_tail(&stuser->list, &stdev->mrpc_queue);
154
155         mrpc_cmd_submit(stdev);
156
157         return 0;
158 }
159
160 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
161 {
162         /* requires the mrpc_mutex to already be held when called */
163         struct switchtec_user *stuser;
164
165         if (list_empty(&stdev->mrpc_queue))
166                 return;
167
168         stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
169                             list);
170
171         stuser->status = ioread32(&stdev->mmio_mrpc->status);
172         if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
173                 return;
174
175         stuser_set_state(stuser, MRPC_DONE);
176         stuser->return_code = 0;
177
178         if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
179                 goto out;
180
181         stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
182         if (stuser->return_code != 0)
183                 goto out;
184
185         memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
186                       stuser->read_len);
187
188 out:
189         complete_all(&stuser->comp);
190         list_del_init(&stuser->list);
191         stuser_put(stuser);
192         stdev->mrpc_busy = 0;
193
194         mrpc_cmd_submit(stdev);
195 }
196
197 static void mrpc_event_work(struct work_struct *work)
198 {
199         struct switchtec_dev *stdev;
200
201         stdev = container_of(work, struct switchtec_dev, mrpc_work);
202
203         dev_dbg(&stdev->dev, "%s\n", __func__);
204
205         mutex_lock(&stdev->mrpc_mutex);
206         cancel_delayed_work(&stdev->mrpc_timeout);
207         mrpc_complete_cmd(stdev);
208         mutex_unlock(&stdev->mrpc_mutex);
209 }
210
211 static void mrpc_timeout_work(struct work_struct *work)
212 {
213         struct switchtec_dev *stdev;
214         u32 status;
215
216         stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
217
218         dev_dbg(&stdev->dev, "%s\n", __func__);
219
220         mutex_lock(&stdev->mrpc_mutex);
221
222         status = ioread32(&stdev->mmio_mrpc->status);
223         if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
224                 schedule_delayed_work(&stdev->mrpc_timeout,
225                                       msecs_to_jiffies(500));
226                 goto out;
227         }
228
229         mrpc_complete_cmd(stdev);
230
231 out:
232         mutex_unlock(&stdev->mrpc_mutex);
233 }
234
235 static ssize_t device_version_show(struct device *dev,
236         struct device_attribute *attr, char *buf)
237 {
238         struct switchtec_dev *stdev = to_stdev(dev);
239         u32 ver;
240
241         ver = ioread32(&stdev->mmio_sys_info->device_version);
242
243         return sprintf(buf, "%x\n", ver);
244 }
245 static DEVICE_ATTR_RO(device_version);
246
247 static ssize_t fw_version_show(struct device *dev,
248         struct device_attribute *attr, char *buf)
249 {
250         struct switchtec_dev *stdev = to_stdev(dev);
251         u32 ver;
252
253         ver = ioread32(&stdev->mmio_sys_info->firmware_version);
254
255         return sprintf(buf, "%08x\n", ver);
256 }
257 static DEVICE_ATTR_RO(fw_version);
258
259 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
260 {
261         int i;
262
263         memcpy_fromio(buf, attr, len);
264         buf[len] = '\n';
265         buf[len + 1] = 0;
266
267         for (i = len - 1; i > 0; i--) {
268                 if (buf[i] != ' ')
269                         break;
270                 buf[i] = '\n';
271                 buf[i + 1] = 0;
272         }
273
274         return strlen(buf);
275 }
276
277 #define DEVICE_ATTR_SYS_INFO_STR(field) \
278 static ssize_t field ## _show(struct device *dev, \
279         struct device_attribute *attr, char *buf) \
280 { \
281         struct switchtec_dev *stdev = to_stdev(dev); \
282         return io_string_show(buf, &stdev->mmio_sys_info->field, \
283                             sizeof(stdev->mmio_sys_info->field)); \
284 } \
285 \
286 static DEVICE_ATTR_RO(field)
287
288 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
289 DEVICE_ATTR_SYS_INFO_STR(product_id);
290 DEVICE_ATTR_SYS_INFO_STR(product_revision);
291 DEVICE_ATTR_SYS_INFO_STR(component_vendor);
292
293 static ssize_t component_id_show(struct device *dev,
294         struct device_attribute *attr, char *buf)
295 {
296         struct switchtec_dev *stdev = to_stdev(dev);
297         int id = ioread16(&stdev->mmio_sys_info->component_id);
298
299         return sprintf(buf, "PM%04X\n", id);
300 }
301 static DEVICE_ATTR_RO(component_id);
302
303 static ssize_t component_revision_show(struct device *dev,
304         struct device_attribute *attr, char *buf)
305 {
306         struct switchtec_dev *stdev = to_stdev(dev);
307         int rev = ioread8(&stdev->mmio_sys_info->component_revision);
308
309         return sprintf(buf, "%d\n", rev);
310 }
311 static DEVICE_ATTR_RO(component_revision);
312
313 static ssize_t partition_show(struct device *dev,
314         struct device_attribute *attr, char *buf)
315 {
316         struct switchtec_dev *stdev = to_stdev(dev);
317
318         return sprintf(buf, "%d\n", stdev->partition);
319 }
320 static DEVICE_ATTR_RO(partition);
321
322 static ssize_t partition_count_show(struct device *dev,
323         struct device_attribute *attr, char *buf)
324 {
325         struct switchtec_dev *stdev = to_stdev(dev);
326
327         return sprintf(buf, "%d\n", stdev->partition_count);
328 }
329 static DEVICE_ATTR_RO(partition_count);
330
331 static struct attribute *switchtec_device_attrs[] = {
332         &dev_attr_device_version.attr,
333         &dev_attr_fw_version.attr,
334         &dev_attr_vendor_id.attr,
335         &dev_attr_product_id.attr,
336         &dev_attr_product_revision.attr,
337         &dev_attr_component_vendor.attr,
338         &dev_attr_component_id.attr,
339         &dev_attr_component_revision.attr,
340         &dev_attr_partition.attr,
341         &dev_attr_partition_count.attr,
342         NULL,
343 };
344
345 ATTRIBUTE_GROUPS(switchtec_device);
346
347 static int switchtec_dev_open(struct inode *inode, struct file *filp)
348 {
349         struct switchtec_dev *stdev;
350         struct switchtec_user *stuser;
351
352         stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
353
354         stuser = stuser_create(stdev);
355         if (IS_ERR(stuser))
356                 return PTR_ERR(stuser);
357
358         filp->private_data = stuser;
359         nonseekable_open(inode, filp);
360
361         dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
362
363         return 0;
364 }
365
366 static int switchtec_dev_release(struct inode *inode, struct file *filp)
367 {
368         struct switchtec_user *stuser = filp->private_data;
369
370         stuser_put(stuser);
371
372         return 0;
373 }
374
375 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
376 {
377         if (mutex_lock_interruptible(&stdev->mrpc_mutex))
378                 return -EINTR;
379
380         if (!stdev->alive) {
381                 mutex_unlock(&stdev->mrpc_mutex);
382                 return -ENODEV;
383         }
384
385         return 0;
386 }
387
388 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
389                                    size_t size, loff_t *off)
390 {
391         struct switchtec_user *stuser = filp->private_data;
392         struct switchtec_dev *stdev = stuser->stdev;
393         int rc;
394
395         if (size < sizeof(stuser->cmd) ||
396             size > sizeof(stuser->cmd) + sizeof(stuser->data))
397                 return -EINVAL;
398
399         stuser->data_len = size - sizeof(stuser->cmd);
400
401         rc = lock_mutex_and_test_alive(stdev);
402         if (rc)
403                 return rc;
404
405         if (stuser->state != MRPC_IDLE) {
406                 rc = -EBADE;
407                 goto out;
408         }
409
410         rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
411         if (rc) {
412                 rc = -EFAULT;
413                 goto out;
414         }
415
416         data += sizeof(stuser->cmd);
417         rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
418         if (rc) {
419                 rc = -EFAULT;
420                 goto out;
421         }
422
423         rc = mrpc_queue_cmd(stuser);
424
425 out:
426         mutex_unlock(&stdev->mrpc_mutex);
427
428         if (rc)
429                 return rc;
430
431         return size;
432 }
433
434 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
435                                   size_t size, loff_t *off)
436 {
437         struct switchtec_user *stuser = filp->private_data;
438         struct switchtec_dev *stdev = stuser->stdev;
439         int rc;
440
441         if (size < sizeof(stuser->cmd) ||
442             size > sizeof(stuser->cmd) + sizeof(stuser->data))
443                 return -EINVAL;
444
445         rc = lock_mutex_and_test_alive(stdev);
446         if (rc)
447                 return rc;
448
449         if (stuser->state == MRPC_IDLE) {
450                 mutex_unlock(&stdev->mrpc_mutex);
451                 return -EBADE;
452         }
453
454         stuser->read_len = size - sizeof(stuser->return_code);
455
456         mutex_unlock(&stdev->mrpc_mutex);
457
458         if (filp->f_flags & O_NONBLOCK) {
459                 if (!try_wait_for_completion(&stuser->comp))
460                         return -EAGAIN;
461         } else {
462                 rc = wait_for_completion_interruptible(&stuser->comp);
463                 if (rc < 0)
464                         return rc;
465         }
466
467         rc = lock_mutex_and_test_alive(stdev);
468         if (rc)
469                 return rc;
470
471         if (stuser->state != MRPC_DONE) {
472                 mutex_unlock(&stdev->mrpc_mutex);
473                 return -EBADE;
474         }
475
476         rc = copy_to_user(data, &stuser->return_code,
477                           sizeof(stuser->return_code));
478         if (rc) {
479                 rc = -EFAULT;
480                 goto out;
481         }
482
483         data += sizeof(stuser->return_code);
484         rc = copy_to_user(data, &stuser->data,
485                           size - sizeof(stuser->return_code));
486         if (rc) {
487                 rc = -EFAULT;
488                 goto out;
489         }
490
491         stuser_set_state(stuser, MRPC_IDLE);
492
493 out:
494         mutex_unlock(&stdev->mrpc_mutex);
495
496         if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
497                 return size;
498         else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
499                 return -ENXIO;
500         else
501                 return -EBADMSG;
502 }
503
504 static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
505 {
506         struct switchtec_user *stuser = filp->private_data;
507         struct switchtec_dev *stdev = stuser->stdev;
508         __poll_t ret = 0;
509
510         poll_wait(filp, &stuser->comp.wait, wait);
511         poll_wait(filp, &stdev->event_wq, wait);
512
513         if (lock_mutex_and_test_alive(stdev))
514                 return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
515
516         mutex_unlock(&stdev->mrpc_mutex);
517
518         if (try_wait_for_completion(&stuser->comp))
519                 ret |= EPOLLIN | EPOLLRDNORM;
520
521         if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
522                 ret |= EPOLLPRI | EPOLLRDBAND;
523
524         return ret;
525 }
526
527 static int ioctl_flash_info(struct switchtec_dev *stdev,
528                             struct switchtec_ioctl_flash_info __user *uinfo)
529 {
530         struct switchtec_ioctl_flash_info info = {0};
531         struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
532
533         info.flash_length = ioread32(&fi->flash_length);
534         info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
535
536         if (copy_to_user(uinfo, &info, sizeof(info)))
537                 return -EFAULT;
538
539         return 0;
540 }
541
542 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
543                              struct partition_info __iomem *pi)
544 {
545         info->address = ioread32(&pi->address);
546         info->length = ioread32(&pi->length);
547 }
548
549 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
550         struct switchtec_ioctl_flash_part_info __user *uinfo)
551 {
552         struct switchtec_ioctl_flash_part_info info = {0};
553         struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
554         struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
555         u32 active_addr = -1;
556
557         if (copy_from_user(&info, uinfo, sizeof(info)))
558                 return -EFAULT;
559
560         switch (info.flash_partition) {
561         case SWITCHTEC_IOCTL_PART_CFG0:
562                 active_addr = ioread32(&fi->active_cfg);
563                 set_fw_info_part(&info, &fi->cfg0);
564                 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
565                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
566                 break;
567         case SWITCHTEC_IOCTL_PART_CFG1:
568                 active_addr = ioread32(&fi->active_cfg);
569                 set_fw_info_part(&info, &fi->cfg1);
570                 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
571                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
572                 break;
573         case SWITCHTEC_IOCTL_PART_IMG0:
574                 active_addr = ioread32(&fi->active_img);
575                 set_fw_info_part(&info, &fi->img0);
576                 if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
577                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
578                 break;
579         case SWITCHTEC_IOCTL_PART_IMG1:
580                 active_addr = ioread32(&fi->active_img);
581                 set_fw_info_part(&info, &fi->img1);
582                 if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
583                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
584                 break;
585         case SWITCHTEC_IOCTL_PART_NVLOG:
586                 set_fw_info_part(&info, &fi->nvlog);
587                 break;
588         case SWITCHTEC_IOCTL_PART_VENDOR0:
589                 set_fw_info_part(&info, &fi->vendor[0]);
590                 break;
591         case SWITCHTEC_IOCTL_PART_VENDOR1:
592                 set_fw_info_part(&info, &fi->vendor[1]);
593                 break;
594         case SWITCHTEC_IOCTL_PART_VENDOR2:
595                 set_fw_info_part(&info, &fi->vendor[2]);
596                 break;
597         case SWITCHTEC_IOCTL_PART_VENDOR3:
598                 set_fw_info_part(&info, &fi->vendor[3]);
599                 break;
600         case SWITCHTEC_IOCTL_PART_VENDOR4:
601                 set_fw_info_part(&info, &fi->vendor[4]);
602                 break;
603         case SWITCHTEC_IOCTL_PART_VENDOR5:
604                 set_fw_info_part(&info, &fi->vendor[5]);
605                 break;
606         case SWITCHTEC_IOCTL_PART_VENDOR6:
607                 set_fw_info_part(&info, &fi->vendor[6]);
608                 break;
609         case SWITCHTEC_IOCTL_PART_VENDOR7:
610                 set_fw_info_part(&info, &fi->vendor[7]);
611                 break;
612         default:
613                 return -EINVAL;
614         }
615
616         if (info.address == active_addr)
617                 info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
618
619         if (copy_to_user(uinfo, &info, sizeof(info)))
620                 return -EFAULT;
621
622         return 0;
623 }
624
625 static int ioctl_event_summary(struct switchtec_dev *stdev,
626         struct switchtec_user *stuser,
627         struct switchtec_ioctl_event_summary __user *usum)
628 {
629         struct switchtec_ioctl_event_summary s = {0};
630         int i;
631         u32 reg;
632
633         s.global = ioread32(&stdev->mmio_sw_event->global_summary);
634         s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
635         s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
636
637         for (i = 0; i < stdev->partition_count; i++) {
638                 reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
639                 s.part[i] = reg;
640         }
641
642         for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
643                 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
644                 if (reg != MICROSEMI_VENDOR_ID)
645                         break;
646
647                 reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
648                 s.pff[i] = reg;
649         }
650
651         if (copy_to_user(usum, &s, sizeof(s)))
652                 return -EFAULT;
653
654         stuser->event_cnt = atomic_read(&stdev->event_cnt);
655
656         return 0;
657 }
658
659 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
660                                   size_t offset, int index)
661 {
662         return (void __iomem *)stdev->mmio_sw_event + offset;
663 }
664
665 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
666                                 size_t offset, int index)
667 {
668         return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
669 }
670
671 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
672                                size_t offset, int index)
673 {
674         return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
675 }
676
677 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
678 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
679 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
680
681 static const struct event_reg {
682         size_t offset;
683         u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
684                                 size_t offset, int index);
685 } event_regs[] = {
686         EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
687         EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
688         EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
689         EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
690         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
691         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
692         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
693         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
694         EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
695         EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
696                twi_mrpc_comp_async_hdr),
697         EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
698         EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
699                cli_mrpc_comp_async_hdr),
700         EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
701         EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
702         EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
703         EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
704         EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
705         EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
706         EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
707         EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
708         EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
709         EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
710         EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
711         EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
712         EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
713         EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
714         EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
715         EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
716         EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
717         EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
718 };
719
720 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
721                                    int event_id, int index)
722 {
723         size_t off;
724
725         if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
726                 return ERR_PTR(-EINVAL);
727
728         off = event_regs[event_id].offset;
729
730         if (event_regs[event_id].map_reg == part_ev_reg) {
731                 if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
732                         index = stdev->partition;
733                 else if (index < 0 || index >= stdev->partition_count)
734                         return ERR_PTR(-EINVAL);
735         } else if (event_regs[event_id].map_reg == pff_ev_reg) {
736                 if (index < 0 || index >= stdev->pff_csr_count)
737                         return ERR_PTR(-EINVAL);
738         }
739
740         return event_regs[event_id].map_reg(stdev, off, index);
741 }
742
743 static int event_ctl(struct switchtec_dev *stdev,
744                      struct switchtec_ioctl_event_ctl *ctl)
745 {
746         int i;
747         u32 __iomem *reg;
748         u32 hdr;
749
750         reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
751         if (IS_ERR(reg))
752                 return PTR_ERR(reg);
753
754         hdr = ioread32(reg);
755         for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
756                 ctl->data[i] = ioread32(&reg[i + 1]);
757
758         ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
759         ctl->count = (hdr >> 5) & 0xFF;
760
761         if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
762                 hdr &= ~SWITCHTEC_EVENT_CLEAR;
763         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
764                 hdr |= SWITCHTEC_EVENT_EN_IRQ;
765         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
766                 hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
767         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
768                 hdr |= SWITCHTEC_EVENT_EN_LOG;
769         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
770                 hdr &= ~SWITCHTEC_EVENT_EN_LOG;
771         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
772                 hdr |= SWITCHTEC_EVENT_EN_CLI;
773         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
774                 hdr &= ~SWITCHTEC_EVENT_EN_CLI;
775         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
776                 hdr |= SWITCHTEC_EVENT_FATAL;
777         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
778                 hdr &= ~SWITCHTEC_EVENT_FATAL;
779
780         if (ctl->flags)
781                 iowrite32(hdr, reg);
782
783         ctl->flags = 0;
784         if (hdr & SWITCHTEC_EVENT_EN_IRQ)
785                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
786         if (hdr & SWITCHTEC_EVENT_EN_LOG)
787                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
788         if (hdr & SWITCHTEC_EVENT_EN_CLI)
789                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
790         if (hdr & SWITCHTEC_EVENT_FATAL)
791                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
792
793         return 0;
794 }
795
796 static int ioctl_event_ctl(struct switchtec_dev *stdev,
797         struct switchtec_ioctl_event_ctl __user *uctl)
798 {
799         int ret;
800         int nr_idxs;
801         struct switchtec_ioctl_event_ctl ctl;
802
803         if (copy_from_user(&ctl, uctl, sizeof(ctl)))
804                 return -EFAULT;
805
806         if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
807                 return -EINVAL;
808
809         if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
810                 return -EINVAL;
811
812         if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
813                 if (event_regs[ctl.event_id].map_reg == global_ev_reg)
814                         nr_idxs = 1;
815                 else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
816                         nr_idxs = stdev->partition_count;
817                 else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
818                         nr_idxs = stdev->pff_csr_count;
819                 else
820                         return -EINVAL;
821
822                 for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
823                         ret = event_ctl(stdev, &ctl);
824                         if (ret < 0)
825                                 return ret;
826                 }
827         } else {
828                 ret = event_ctl(stdev, &ctl);
829                 if (ret < 0)
830                         return ret;
831         }
832
833         if (copy_to_user(uctl, &ctl, sizeof(ctl)))
834                 return -EFAULT;
835
836         return 0;
837 }
838
839 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
840                              struct switchtec_ioctl_pff_port *up)
841 {
842         int i, part;
843         u32 reg;
844         struct part_cfg_regs *pcfg;
845         struct switchtec_ioctl_pff_port p;
846
847         if (copy_from_user(&p, up, sizeof(p)))
848                 return -EFAULT;
849
850         p.port = -1;
851         for (part = 0; part < stdev->partition_count; part++) {
852                 pcfg = &stdev->mmio_part_cfg_all[part];
853                 p.partition = part;
854
855                 reg = ioread32(&pcfg->usp_pff_inst_id);
856                 if (reg == p.pff) {
857                         p.port = 0;
858                         break;
859                 }
860
861                 reg = ioread32(&pcfg->vep_pff_inst_id);
862                 if (reg == p.pff) {
863                         p.port = SWITCHTEC_IOCTL_PFF_VEP;
864                         break;
865                 }
866
867                 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
868                         reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
869                         if (reg != p.pff)
870                                 continue;
871
872                         p.port = i + 1;
873                         break;
874                 }
875
876                 if (p.port != -1)
877                         break;
878         }
879
880         if (copy_to_user(up, &p, sizeof(p)))
881                 return -EFAULT;
882
883         return 0;
884 }
885
886 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
887                              struct switchtec_ioctl_pff_port *up)
888 {
889         struct switchtec_ioctl_pff_port p;
890         struct part_cfg_regs *pcfg;
891
892         if (copy_from_user(&p, up, sizeof(p)))
893                 return -EFAULT;
894
895         if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
896                 pcfg = stdev->mmio_part_cfg;
897         else if (p.partition < stdev->partition_count)
898                 pcfg = &stdev->mmio_part_cfg_all[p.partition];
899         else
900                 return -EINVAL;
901
902         switch (p.port) {
903         case 0:
904                 p.pff = ioread32(&pcfg->usp_pff_inst_id);
905                 break;
906         case SWITCHTEC_IOCTL_PFF_VEP:
907                 p.pff = ioread32(&pcfg->vep_pff_inst_id);
908                 break;
909         default:
910                 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
911                         return -EINVAL;
912                 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
913                 break;
914         }
915
916         if (copy_to_user(up, &p, sizeof(p)))
917                 return -EFAULT;
918
919         return 0;
920 }
921
922 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
923                                 unsigned long arg)
924 {
925         struct switchtec_user *stuser = filp->private_data;
926         struct switchtec_dev *stdev = stuser->stdev;
927         int rc;
928         void __user *argp = (void __user *)arg;
929
930         rc = lock_mutex_and_test_alive(stdev);
931         if (rc)
932                 return rc;
933
934         switch (cmd) {
935         case SWITCHTEC_IOCTL_FLASH_INFO:
936                 rc = ioctl_flash_info(stdev, argp);
937                 break;
938         case SWITCHTEC_IOCTL_FLASH_PART_INFO:
939                 rc = ioctl_flash_part_info(stdev, argp);
940                 break;
941         case SWITCHTEC_IOCTL_EVENT_SUMMARY:
942                 rc = ioctl_event_summary(stdev, stuser, argp);
943                 break;
944         case SWITCHTEC_IOCTL_EVENT_CTL:
945                 rc = ioctl_event_ctl(stdev, argp);
946                 break;
947         case SWITCHTEC_IOCTL_PFF_TO_PORT:
948                 rc = ioctl_pff_to_port(stdev, argp);
949                 break;
950         case SWITCHTEC_IOCTL_PORT_TO_PFF:
951                 rc = ioctl_port_to_pff(stdev, argp);
952                 break;
953         default:
954                 rc = -ENOTTY;
955                 break;
956         }
957
958         mutex_unlock(&stdev->mrpc_mutex);
959         return rc;
960 }
961
962 static const struct file_operations switchtec_fops = {
963         .owner = THIS_MODULE,
964         .open = switchtec_dev_open,
965         .release = switchtec_dev_release,
966         .write = switchtec_dev_write,
967         .read = switchtec_dev_read,
968         .poll = switchtec_dev_poll,
969         .unlocked_ioctl = switchtec_dev_ioctl,
970         .compat_ioctl = switchtec_dev_ioctl,
971 };
972
973 static void link_event_work(struct work_struct *work)
974 {
975         struct switchtec_dev *stdev;
976
977         stdev = container_of(work, struct switchtec_dev, link_event_work);
978
979         if (stdev->link_notifier)
980                 stdev->link_notifier(stdev);
981 }
982
983 static void check_link_state_events(struct switchtec_dev *stdev)
984 {
985         int idx;
986         u32 reg;
987         int count;
988         int occurred = 0;
989
990         for (idx = 0; idx < stdev->pff_csr_count; idx++) {
991                 reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
992                 dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
993                 count = (reg >> 5) & 0xFF;
994
995                 if (count != stdev->link_event_count[idx]) {
996                         occurred = 1;
997                         stdev->link_event_count[idx] = count;
998                 }
999         }
1000
1001         if (occurred)
1002                 schedule_work(&stdev->link_event_work);
1003 }
1004
1005 static void enable_link_state_events(struct switchtec_dev *stdev)
1006 {
1007         int idx;
1008
1009         for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1010                 iowrite32(SWITCHTEC_EVENT_CLEAR |
1011                           SWITCHTEC_EVENT_EN_IRQ,
1012                           &stdev->mmio_pff_csr[idx].link_state_hdr);
1013         }
1014 }
1015
1016 static void stdev_release(struct device *dev)
1017 {
1018         struct switchtec_dev *stdev = to_stdev(dev);
1019
1020         kfree(stdev);
1021 }
1022
1023 static void stdev_kill(struct switchtec_dev *stdev)
1024 {
1025         struct switchtec_user *stuser, *tmpuser;
1026
1027         pci_clear_master(stdev->pdev);
1028
1029         cancel_delayed_work_sync(&stdev->mrpc_timeout);
1030
1031         /* Mark the hardware as unavailable and complete all completions */
1032         mutex_lock(&stdev->mrpc_mutex);
1033         stdev->alive = false;
1034
1035         /* Wake up and kill any users waiting on an MRPC request */
1036         list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1037                 complete_all(&stuser->comp);
1038                 list_del_init(&stuser->list);
1039                 stuser_put(stuser);
1040         }
1041
1042         mutex_unlock(&stdev->mrpc_mutex);
1043
1044         /* Wake up any users waiting on event_wq */
1045         wake_up_interruptible(&stdev->event_wq);
1046 }
1047
1048 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1049 {
1050         struct switchtec_dev *stdev;
1051         int minor;
1052         struct device *dev;
1053         struct cdev *cdev;
1054         int rc;
1055
1056         stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1057                              dev_to_node(&pdev->dev));
1058         if (!stdev)
1059                 return ERR_PTR(-ENOMEM);
1060
1061         stdev->alive = true;
1062         stdev->pdev = pdev;
1063         INIT_LIST_HEAD(&stdev->mrpc_queue);
1064         mutex_init(&stdev->mrpc_mutex);
1065         stdev->mrpc_busy = 0;
1066         INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1067         INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1068         INIT_WORK(&stdev->link_event_work, link_event_work);
1069         init_waitqueue_head(&stdev->event_wq);
1070         atomic_set(&stdev->event_cnt, 0);
1071
1072         dev = &stdev->dev;
1073         device_initialize(dev);
1074         dev->class = switchtec_class;
1075         dev->parent = &pdev->dev;
1076         dev->groups = switchtec_device_groups;
1077         dev->release = stdev_release;
1078
1079         minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1080                                GFP_KERNEL);
1081         if (minor < 0) {
1082                 rc = minor;
1083                 goto err_put;
1084         }
1085
1086         dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1087         dev_set_name(dev, "switchtec%d", minor);
1088
1089         cdev = &stdev->cdev;
1090         cdev_init(cdev, &switchtec_fops);
1091         cdev->owner = THIS_MODULE;
1092
1093         return stdev;
1094
1095 err_put:
1096         put_device(&stdev->dev);
1097         return ERR_PTR(rc);
1098 }
1099
1100 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1101 {
1102         size_t off = event_regs[eid].offset;
1103         u32 __iomem *hdr_reg;
1104         u32 hdr;
1105
1106         hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1107         hdr = ioread32(hdr_reg);
1108
1109         if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1110                 return 0;
1111
1112         if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
1113                 return 0;
1114
1115         dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1116         hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1117         iowrite32(hdr, hdr_reg);
1118
1119         return 1;
1120 }
1121
1122 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1123 {
1124         int idx;
1125         int count = 0;
1126
1127         if (event_regs[eid].map_reg == part_ev_reg) {
1128                 for (idx = 0; idx < stdev->partition_count; idx++)
1129                         count += mask_event(stdev, eid, idx);
1130         } else if (event_regs[eid].map_reg == pff_ev_reg) {
1131                 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1132                         if (!stdev->pff_local[idx])
1133                                 continue;
1134
1135                         count += mask_event(stdev, eid, idx);
1136                 }
1137         } else {
1138                 count += mask_event(stdev, eid, 0);
1139         }
1140
1141         return count;
1142 }
1143
1144 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1145 {
1146         struct switchtec_dev *stdev = dev;
1147         u32 reg;
1148         irqreturn_t ret = IRQ_NONE;
1149         int eid, event_count = 0;
1150
1151         reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1152         if (reg & SWITCHTEC_EVENT_OCCURRED) {
1153                 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1154                 ret = IRQ_HANDLED;
1155                 schedule_work(&stdev->mrpc_work);
1156                 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1157         }
1158
1159         check_link_state_events(stdev);
1160
1161         for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1162                 event_count += mask_all_events(stdev, eid);
1163
1164         if (event_count) {
1165                 atomic_inc(&stdev->event_cnt);
1166                 wake_up_interruptible(&stdev->event_wq);
1167                 dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1168                         event_count);
1169                 return IRQ_HANDLED;
1170         }
1171
1172         return ret;
1173 }
1174
1175 static int switchtec_init_isr(struct switchtec_dev *stdev)
1176 {
1177         int nvecs;
1178         int event_irq;
1179
1180         nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
1181                                       PCI_IRQ_MSIX | PCI_IRQ_MSI);
1182         if (nvecs < 0)
1183                 return nvecs;
1184
1185         event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1186         if (event_irq < 0 || event_irq >= nvecs)
1187                 return -EFAULT;
1188
1189         event_irq = pci_irq_vector(stdev->pdev, event_irq);
1190         if (event_irq < 0)
1191                 return event_irq;
1192
1193         return devm_request_irq(&stdev->pdev->dev, event_irq,
1194                                 switchtec_event_isr, 0,
1195                                 KBUILD_MODNAME, stdev);
1196 }
1197
1198 static void init_pff(struct switchtec_dev *stdev)
1199 {
1200         int i;
1201         u32 reg;
1202         struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1203
1204         for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1205                 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1206                 if (reg != MICROSEMI_VENDOR_ID)
1207                         break;
1208         }
1209
1210         stdev->pff_csr_count = i;
1211
1212         reg = ioread32(&pcfg->usp_pff_inst_id);
1213         if (reg < SWITCHTEC_MAX_PFF_CSR)
1214                 stdev->pff_local[reg] = 1;
1215
1216         reg = ioread32(&pcfg->vep_pff_inst_id);
1217         if (reg < SWITCHTEC_MAX_PFF_CSR)
1218                 stdev->pff_local[reg] = 1;
1219
1220         for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1221                 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1222                 if (reg < SWITCHTEC_MAX_PFF_CSR)
1223                         stdev->pff_local[reg] = 1;
1224         }
1225 }
1226
1227 static int switchtec_init_pci(struct switchtec_dev *stdev,
1228                               struct pci_dev *pdev)
1229 {
1230         int rc;
1231
1232         rc = pcim_enable_device(pdev);
1233         if (rc)
1234                 return rc;
1235
1236         rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
1237         if (rc)
1238                 return rc;
1239
1240         pci_set_master(pdev);
1241
1242         stdev->mmio = pcim_iomap_table(pdev)[0];
1243         stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
1244         stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1245         stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1246         stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1247         stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1248         stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1249         stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1250         stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1251         stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1252         stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1253
1254         if (stdev->partition_count < 1)
1255                 stdev->partition_count = 1;
1256
1257         init_pff(stdev);
1258
1259         pci_set_drvdata(pdev, stdev);
1260
1261         return 0;
1262 }
1263
1264 static int switchtec_pci_probe(struct pci_dev *pdev,
1265                                const struct pci_device_id *id)
1266 {
1267         struct switchtec_dev *stdev;
1268         int rc;
1269
1270         if (pdev->class == MICROSEMI_NTB_CLASSCODE)
1271                 request_module_nowait("ntb_hw_switchtec");
1272
1273         stdev = stdev_create(pdev);
1274         if (IS_ERR(stdev))
1275                 return PTR_ERR(stdev);
1276
1277         rc = switchtec_init_pci(stdev, pdev);
1278         if (rc)
1279                 goto err_put;
1280
1281         rc = switchtec_init_isr(stdev);
1282         if (rc) {
1283                 dev_err(&stdev->dev, "failed to init isr.\n");
1284                 goto err_put;
1285         }
1286
1287         iowrite32(SWITCHTEC_EVENT_CLEAR |
1288                   SWITCHTEC_EVENT_EN_IRQ,
1289                   &stdev->mmio_part_cfg->mrpc_comp_hdr);
1290         enable_link_state_events(stdev);
1291
1292         rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1293         if (rc)
1294                 goto err_devadd;
1295
1296         dev_info(&stdev->dev, "Management device registered.\n");
1297
1298         return 0;
1299
1300 err_devadd:
1301         stdev_kill(stdev);
1302 err_put:
1303         ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1304         put_device(&stdev->dev);
1305         return rc;
1306 }
1307
1308 static void switchtec_pci_remove(struct pci_dev *pdev)
1309 {
1310         struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1311
1312         pci_set_drvdata(pdev, NULL);
1313
1314         cdev_device_del(&stdev->cdev, &stdev->dev);
1315         ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1316         dev_info(&stdev->dev, "unregistered.\n");
1317
1318         stdev_kill(stdev);
1319         put_device(&stdev->dev);
1320 }
1321
1322 #define SWITCHTEC_PCI_DEVICE(device_id) \
1323         { \
1324                 .vendor     = MICROSEMI_VENDOR_ID, \
1325                 .device     = device_id, \
1326                 .subvendor  = PCI_ANY_ID, \
1327                 .subdevice  = PCI_ANY_ID, \
1328                 .class      = MICROSEMI_MGMT_CLASSCODE, \
1329                 .class_mask = 0xFFFFFFFF, \
1330         }, \
1331         { \
1332                 .vendor     = MICROSEMI_VENDOR_ID, \
1333                 .device     = device_id, \
1334                 .subvendor  = PCI_ANY_ID, \
1335                 .subdevice  = PCI_ANY_ID, \
1336                 .class      = MICROSEMI_NTB_CLASSCODE, \
1337                 .class_mask = 0xFFFFFFFF, \
1338         }
1339
1340 static const struct pci_device_id switchtec_pci_tbl[] = {
1341         SWITCHTEC_PCI_DEVICE(0x8531),  //PFX 24xG3
1342         SWITCHTEC_PCI_DEVICE(0x8532),  //PFX 32xG3
1343         SWITCHTEC_PCI_DEVICE(0x8533),  //PFX 48xG3
1344         SWITCHTEC_PCI_DEVICE(0x8534),  //PFX 64xG3
1345         SWITCHTEC_PCI_DEVICE(0x8535),  //PFX 80xG3
1346         SWITCHTEC_PCI_DEVICE(0x8536),  //PFX 96xG3
1347         SWITCHTEC_PCI_DEVICE(0x8541),  //PSX 24xG3
1348         SWITCHTEC_PCI_DEVICE(0x8542),  //PSX 32xG3
1349         SWITCHTEC_PCI_DEVICE(0x8543),  //PSX 48xG3
1350         SWITCHTEC_PCI_DEVICE(0x8544),  //PSX 64xG3
1351         SWITCHTEC_PCI_DEVICE(0x8545),  //PSX 80xG3
1352         SWITCHTEC_PCI_DEVICE(0x8546),  //PSX 96xG3
1353         SWITCHTEC_PCI_DEVICE(0x8551),  //PAX 24XG3
1354         SWITCHTEC_PCI_DEVICE(0x8552),  //PAX 32XG3
1355         SWITCHTEC_PCI_DEVICE(0x8553),  //PAX 48XG3
1356         SWITCHTEC_PCI_DEVICE(0x8554),  //PAX 64XG3
1357         SWITCHTEC_PCI_DEVICE(0x8555),  //PAX 80XG3
1358         SWITCHTEC_PCI_DEVICE(0x8556),  //PAX 96XG3
1359         SWITCHTEC_PCI_DEVICE(0x8561),  //PFXL 24XG3
1360         SWITCHTEC_PCI_DEVICE(0x8562),  //PFXL 32XG3
1361         SWITCHTEC_PCI_DEVICE(0x8563),  //PFXL 48XG3
1362         SWITCHTEC_PCI_DEVICE(0x8564),  //PFXL 64XG3
1363         SWITCHTEC_PCI_DEVICE(0x8565),  //PFXL 80XG3
1364         SWITCHTEC_PCI_DEVICE(0x8566),  //PFXL 96XG3
1365         SWITCHTEC_PCI_DEVICE(0x8571),  //PFXI 24XG3
1366         SWITCHTEC_PCI_DEVICE(0x8572),  //PFXI 32XG3
1367         SWITCHTEC_PCI_DEVICE(0x8573),  //PFXI 48XG3
1368         SWITCHTEC_PCI_DEVICE(0x8574),  //PFXI 64XG3
1369         SWITCHTEC_PCI_DEVICE(0x8575),  //PFXI 80XG3
1370         SWITCHTEC_PCI_DEVICE(0x8576),  //PFXI 96XG3
1371         {0}
1372 };
1373 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1374
1375 static struct pci_driver switchtec_pci_driver = {
1376         .name           = KBUILD_MODNAME,
1377         .id_table       = switchtec_pci_tbl,
1378         .probe          = switchtec_pci_probe,
1379         .remove         = switchtec_pci_remove,
1380 };
1381
1382 static int __init switchtec_init(void)
1383 {
1384         int rc;
1385
1386         rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1387                                  "switchtec");
1388         if (rc)
1389                 return rc;
1390
1391         switchtec_class = class_create(THIS_MODULE, "switchtec");
1392         if (IS_ERR(switchtec_class)) {
1393                 rc = PTR_ERR(switchtec_class);
1394                 goto err_create_class;
1395         }
1396
1397         rc = pci_register_driver(&switchtec_pci_driver);
1398         if (rc)
1399                 goto err_pci_register;
1400
1401         pr_info(KBUILD_MODNAME ": loaded.\n");
1402
1403         return 0;
1404
1405 err_pci_register:
1406         class_destroy(switchtec_class);
1407
1408 err_create_class:
1409         unregister_chrdev_region(switchtec_devt, max_devices);
1410
1411         return rc;
1412 }
1413 module_init(switchtec_init);
1414
1415 static void __exit switchtec_exit(void)
1416 {
1417         pci_unregister_driver(&switchtec_pci_driver);
1418         class_destroy(switchtec_class);
1419         unregister_chrdev_region(switchtec_devt, max_devices);
1420         ida_destroy(&switchtec_minor_ida);
1421
1422         pr_info(KBUILD_MODNAME ": unloaded.\n");
1423 }
1424 module_exit(switchtec_exit);