vfio/mdev: Correct the function signatures for the mdev_type_attributes
[linux-2.6-microblaze.git] / samples / vfio-mdev / mtty.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Mediated virtual PCI serial host device driver
4  *
5  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6  *     Author: Neo Jia <cjia@nvidia.com>
7  *             Kirti Wankhede <kwankhede@nvidia.com>
8  *
9  * Sample driver that creates mdev device that simulates serial port over PCI
10  * card.
11  */
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/kernel.h>
17 #include <linux/fs.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/cdev.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/uuid.h>
24 #include <linux/vfio.h>
25 #include <linux/iommu.h>
26 #include <linux/sysfs.h>
27 #include <linux/ctype.h>
28 #include <linux/file.h>
29 #include <linux/mdev.h>
30 #include <linux/pci.h>
31 #include <linux/serial.h>
32 #include <uapi/linux/serial_reg.h>
33 #include <linux/eventfd.h>
34 /*
35  * #defines
36  */
37
38 #define VERSION_STRING  "0.1"
39 #define DRIVER_AUTHOR   "NVIDIA Corporation"
40
41 #define MTTY_CLASS_NAME "mtty"
42
43 #define MTTY_NAME       "mtty"
44
45 #define MTTY_STRING_LEN         16
46
47 #define MTTY_CONFIG_SPACE_SIZE  0xff
48 #define MTTY_IO_BAR_SIZE        0x8
49 #define MTTY_MMIO_BAR_SIZE      0x100000
50
51 #define STORE_LE16(addr, val)   (*(u16 *)addr = val)
52 #define STORE_LE32(addr, val)   (*(u32 *)addr = val)
53
54 #define MAX_FIFO_SIZE   16
55
56 #define CIRCULAR_BUF_INC_IDX(idx)    (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
57
58 #define MTTY_VFIO_PCI_OFFSET_SHIFT   40
59
60 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
61 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
62                                 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
63 #define MTTY_VFIO_PCI_OFFSET_MASK    \
64                                 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
65 #define MAX_MTTYS       24
66
67 /*
68  * Global Structures
69  */
70
71 static struct mtty_dev {
72         dev_t           vd_devt;
73         struct class    *vd_class;
74         struct cdev     vd_cdev;
75         struct idr      vd_idr;
76         struct device   dev;
77 } mtty_dev;
78
79 struct mdev_region_info {
80         u64 start;
81         u64 phys_start;
82         u32 size;
83         u64 vfio_offset;
84 };
85
86 #if defined(DEBUG_REGS)
87 static const char *wr_reg[] = {
88         "TX",
89         "IER",
90         "FCR",
91         "LCR",
92         "MCR",
93         "LSR",
94         "MSR",
95         "SCR"
96 };
97
98 static const char *rd_reg[] = {
99         "RX",
100         "IER",
101         "IIR",
102         "LCR",
103         "MCR",
104         "LSR",
105         "MSR",
106         "SCR"
107 };
108 #endif
109
110 /* loop back buffer */
111 struct rxtx {
112         u8 fifo[MAX_FIFO_SIZE];
113         u8 head, tail;
114         u8 count;
115 };
116
117 struct serial_port {
118         u8 uart_reg[8];         /* 8 registers */
119         struct rxtx rxtx;       /* loop back buffer */
120         bool dlab;
121         bool overrun;
122         u16 divisor;
123         u8 fcr;                 /* FIFO control register */
124         u8 max_fifo_size;
125         u8 intr_trigger_level;  /* interrupt trigger level */
126 };
127
128 /* State of each mdev device */
129 struct mdev_state {
130         int irq_fd;
131         struct eventfd_ctx *intx_evtfd;
132         struct eventfd_ctx *msi_evtfd;
133         int irq_index;
134         u8 *vconfig;
135         struct mutex ops_lock;
136         struct mdev_device *mdev;
137         struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
138         u32 bar_mask[VFIO_PCI_NUM_REGIONS];
139         struct list_head next;
140         struct serial_port s[2];
141         struct mutex rxtx_lock;
142         struct vfio_device_info dev_info;
143         int nr_ports;
144 };
145
146 static struct mutex mdev_list_lock;
147 static struct list_head mdev_devices_list;
148
149 static const struct file_operations vd_fops = {
150         .owner          = THIS_MODULE,
151 };
152
153 /* function prototypes */
154
155 static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
156
157 /* Helper functions */
158
159 static void dump_buffer(u8 *buf, uint32_t count)
160 {
161 #if defined(DEBUG)
162         int i;
163
164         pr_info("Buffer:\n");
165         for (i = 0; i < count; i++) {
166                 pr_info("%2x ", *(buf + i));
167                 if ((i + 1) % 16 == 0)
168                         pr_info("\n");
169         }
170 #endif
171 }
172
173 static void mtty_create_config_space(struct mdev_state *mdev_state)
174 {
175         /* PCI dev ID */
176         STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
177
178         /* Control: I/O+, Mem-, BusMaster- */
179         STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
180
181         /* Status: capabilities list absent */
182         STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
183
184         /* Rev ID */
185         mdev_state->vconfig[0x8] =  0x10;
186
187         /* programming interface class : 16550-compatible serial controller */
188         mdev_state->vconfig[0x9] =  0x02;
189
190         /* Sub class : 00 */
191         mdev_state->vconfig[0xa] =  0x00;
192
193         /* Base class : Simple Communication controllers */
194         mdev_state->vconfig[0xb] =  0x07;
195
196         /* base address registers */
197         /* BAR0: IO space */
198         STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
199         mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
200
201         if (mdev_state->nr_ports == 2) {
202                 /* BAR1: IO space */
203                 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
204                 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
205         }
206
207         /* Subsystem ID */
208         STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
209
210         mdev_state->vconfig[0x34] =  0x00;   /* Cap Ptr */
211         mdev_state->vconfig[0x3d] =  0x01;   /* interrupt pin (INTA#) */
212
213         /* Vendor specific data */
214         mdev_state->vconfig[0x40] =  0x23;
215         mdev_state->vconfig[0x43] =  0x80;
216         mdev_state->vconfig[0x44] =  0x23;
217         mdev_state->vconfig[0x48] =  0x23;
218         mdev_state->vconfig[0x4c] =  0x23;
219
220         mdev_state->vconfig[0x60] =  0x50;
221         mdev_state->vconfig[0x61] =  0x43;
222         mdev_state->vconfig[0x62] =  0x49;
223         mdev_state->vconfig[0x63] =  0x20;
224         mdev_state->vconfig[0x64] =  0x53;
225         mdev_state->vconfig[0x65] =  0x65;
226         mdev_state->vconfig[0x66] =  0x72;
227         mdev_state->vconfig[0x67] =  0x69;
228         mdev_state->vconfig[0x68] =  0x61;
229         mdev_state->vconfig[0x69] =  0x6c;
230         mdev_state->vconfig[0x6a] =  0x2f;
231         mdev_state->vconfig[0x6b] =  0x55;
232         mdev_state->vconfig[0x6c] =  0x41;
233         mdev_state->vconfig[0x6d] =  0x52;
234         mdev_state->vconfig[0x6e] =  0x54;
235 }
236
237 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
238                                  u8 *buf, u32 count)
239 {
240         u32 cfg_addr, bar_mask, bar_index = 0;
241
242         switch (offset) {
243         case 0x04: /* device control */
244         case 0x06: /* device status */
245                 /* do nothing */
246                 break;
247         case 0x3c:  /* interrupt line */
248                 mdev_state->vconfig[0x3c] = buf[0];
249                 break;
250         case 0x3d:
251                 /*
252                  * Interrupt Pin is hardwired to INTA.
253                  * This field is write protected by hardware
254                  */
255                 break;
256         case 0x10:  /* BAR0 */
257         case 0x14:  /* BAR1 */
258                 if (offset == 0x10)
259                         bar_index = 0;
260                 else if (offset == 0x14)
261                         bar_index = 1;
262
263                 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
264                         STORE_LE32(&mdev_state->vconfig[offset], 0);
265                         break;
266                 }
267
268                 cfg_addr = *(u32 *)buf;
269                 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
270
271                 if (cfg_addr == 0xffffffff) {
272                         bar_mask = mdev_state->bar_mask[bar_index];
273                         cfg_addr = (cfg_addr & bar_mask);
274                 }
275
276                 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
277                 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
278                 break;
279         case 0x18:  /* BAR2 */
280         case 0x1c:  /* BAR3 */
281         case 0x20:  /* BAR4 */
282                 STORE_LE32(&mdev_state->vconfig[offset], 0);
283                 break;
284         default:
285                 pr_info("PCI config write @0x%x of %d bytes not handled\n",
286                         offset, count);
287                 break;
288         }
289 }
290
291 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
292                                 u16 offset, u8 *buf, u32 count)
293 {
294         u8 data = *buf;
295
296         /* Handle data written by guest */
297         switch (offset) {
298         case UART_TX:
299                 /* if DLAB set, data is LSB of divisor */
300                 if (mdev_state->s[index].dlab) {
301                         mdev_state->s[index].divisor |= data;
302                         break;
303                 }
304
305                 mutex_lock(&mdev_state->rxtx_lock);
306
307                 /* save in TX buffer */
308                 if (mdev_state->s[index].rxtx.count <
309                                 mdev_state->s[index].max_fifo_size) {
310                         mdev_state->s[index].rxtx.fifo[
311                                         mdev_state->s[index].rxtx.head] = data;
312                         mdev_state->s[index].rxtx.count++;
313                         CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
314                         mdev_state->s[index].overrun = false;
315
316                         /*
317                          * Trigger interrupt if receive data interrupt is
318                          * enabled and fifo reached trigger level
319                          */
320                         if ((mdev_state->s[index].uart_reg[UART_IER] &
321                                                 UART_IER_RDI) &&
322                            (mdev_state->s[index].rxtx.count ==
323                                     mdev_state->s[index].intr_trigger_level)) {
324                                 /* trigger interrupt */
325 #if defined(DEBUG_INTR)
326                                 pr_err("Serial port %d: Fifo level trigger\n",
327                                         index);
328 #endif
329                                 mtty_trigger_interrupt(mdev_state);
330                         }
331                 } else {
332 #if defined(DEBUG_INTR)
333                         pr_err("Serial port %d: Buffer Overflow\n", index);
334 #endif
335                         mdev_state->s[index].overrun = true;
336
337                         /*
338                          * Trigger interrupt if receiver line status interrupt
339                          * is enabled
340                          */
341                         if (mdev_state->s[index].uart_reg[UART_IER] &
342                                                                 UART_IER_RLSI)
343                                 mtty_trigger_interrupt(mdev_state);
344                 }
345                 mutex_unlock(&mdev_state->rxtx_lock);
346                 break;
347
348         case UART_IER:
349                 /* if DLAB set, data is MSB of divisor */
350                 if (mdev_state->s[index].dlab)
351                         mdev_state->s[index].divisor |= (u16)data << 8;
352                 else {
353                         mdev_state->s[index].uart_reg[offset] = data;
354                         mutex_lock(&mdev_state->rxtx_lock);
355                         if ((data & UART_IER_THRI) &&
356                             (mdev_state->s[index].rxtx.head ==
357                                         mdev_state->s[index].rxtx.tail)) {
358 #if defined(DEBUG_INTR)
359                                 pr_err("Serial port %d: IER_THRI write\n",
360                                         index);
361 #endif
362                                 mtty_trigger_interrupt(mdev_state);
363                         }
364
365                         mutex_unlock(&mdev_state->rxtx_lock);
366                 }
367
368                 break;
369
370         case UART_FCR:
371                 mdev_state->s[index].fcr = data;
372
373                 mutex_lock(&mdev_state->rxtx_lock);
374                 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
375                         /* clear loop back FIFO */
376                         mdev_state->s[index].rxtx.count = 0;
377                         mdev_state->s[index].rxtx.head = 0;
378                         mdev_state->s[index].rxtx.tail = 0;
379                 }
380                 mutex_unlock(&mdev_state->rxtx_lock);
381
382                 switch (data & UART_FCR_TRIGGER_MASK) {
383                 case UART_FCR_TRIGGER_1:
384                         mdev_state->s[index].intr_trigger_level = 1;
385                         break;
386
387                 case UART_FCR_TRIGGER_4:
388                         mdev_state->s[index].intr_trigger_level = 4;
389                         break;
390
391                 case UART_FCR_TRIGGER_8:
392                         mdev_state->s[index].intr_trigger_level = 8;
393                         break;
394
395                 case UART_FCR_TRIGGER_14:
396                         mdev_state->s[index].intr_trigger_level = 14;
397                         break;
398                 }
399
400                 /*
401                  * Set trigger level to 1 otherwise or  implement timer with
402                  * timeout of 4 characters and on expiring that timer set
403                  * Recevice data timeout in IIR register
404                  */
405                 mdev_state->s[index].intr_trigger_level = 1;
406                 if (data & UART_FCR_ENABLE_FIFO)
407                         mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
408                 else {
409                         mdev_state->s[index].max_fifo_size = 1;
410                         mdev_state->s[index].intr_trigger_level = 1;
411                 }
412
413                 break;
414
415         case UART_LCR:
416                 if (data & UART_LCR_DLAB) {
417                         mdev_state->s[index].dlab = true;
418                         mdev_state->s[index].divisor = 0;
419                 } else
420                         mdev_state->s[index].dlab = false;
421
422                 mdev_state->s[index].uart_reg[offset] = data;
423                 break;
424
425         case UART_MCR:
426                 mdev_state->s[index].uart_reg[offset] = data;
427
428                 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
429                                 (data & UART_MCR_OUT2)) {
430 #if defined(DEBUG_INTR)
431                         pr_err("Serial port %d: MCR_OUT2 write\n", index);
432 #endif
433                         mtty_trigger_interrupt(mdev_state);
434                 }
435
436                 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
437                                 (data & (UART_MCR_RTS | UART_MCR_DTR))) {
438 #if defined(DEBUG_INTR)
439                         pr_err("Serial port %d: MCR RTS/DTR write\n", index);
440 #endif
441                         mtty_trigger_interrupt(mdev_state);
442                 }
443                 break;
444
445         case UART_LSR:
446         case UART_MSR:
447                 /* do nothing */
448                 break;
449
450         case UART_SCR:
451                 mdev_state->s[index].uart_reg[offset] = data;
452                 break;
453
454         default:
455                 break;
456         }
457 }
458
459 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
460                             u16 offset, u8 *buf, u32 count)
461 {
462         /* Handle read requests by guest */
463         switch (offset) {
464         case UART_RX:
465                 /* if DLAB set, data is LSB of divisor */
466                 if (mdev_state->s[index].dlab) {
467                         *buf  = (u8)mdev_state->s[index].divisor;
468                         break;
469                 }
470
471                 mutex_lock(&mdev_state->rxtx_lock);
472                 /* return data in tx buffer */
473                 if (mdev_state->s[index].rxtx.head !=
474                                  mdev_state->s[index].rxtx.tail) {
475                         *buf = mdev_state->s[index].rxtx.fifo[
476                                                 mdev_state->s[index].rxtx.tail];
477                         mdev_state->s[index].rxtx.count--;
478                         CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
479                 }
480
481                 if (mdev_state->s[index].rxtx.head ==
482                                 mdev_state->s[index].rxtx.tail) {
483                 /*
484                  *  Trigger interrupt if tx buffer empty interrupt is
485                  *  enabled and fifo is empty
486                  */
487 #if defined(DEBUG_INTR)
488                         pr_err("Serial port %d: Buffer Empty\n", index);
489 #endif
490                         if (mdev_state->s[index].uart_reg[UART_IER] &
491                                                          UART_IER_THRI)
492                                 mtty_trigger_interrupt(mdev_state);
493                 }
494                 mutex_unlock(&mdev_state->rxtx_lock);
495
496                 break;
497
498         case UART_IER:
499                 if (mdev_state->s[index].dlab) {
500                         *buf = (u8)(mdev_state->s[index].divisor >> 8);
501                         break;
502                 }
503                 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
504                 break;
505
506         case UART_IIR:
507         {
508                 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
509                 *buf = 0;
510
511                 mutex_lock(&mdev_state->rxtx_lock);
512                 /* Interrupt priority 1: Parity, overrun, framing or break */
513                 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
514                         *buf |= UART_IIR_RLSI;
515
516                 /* Interrupt priority 2: Fifo trigger level reached */
517                 if ((ier & UART_IER_RDI) &&
518                     (mdev_state->s[index].rxtx.count >=
519                       mdev_state->s[index].intr_trigger_level))
520                         *buf |= UART_IIR_RDI;
521
522                 /* Interrupt priotiry 3: transmitter holding register empty */
523                 if ((ier & UART_IER_THRI) &&
524                     (mdev_state->s[index].rxtx.head ==
525                                 mdev_state->s[index].rxtx.tail))
526                         *buf |= UART_IIR_THRI;
527
528                 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD  */
529                 if ((ier & UART_IER_MSI) &&
530                     (mdev_state->s[index].uart_reg[UART_MCR] &
531                                  (UART_MCR_RTS | UART_MCR_DTR)))
532                         *buf |= UART_IIR_MSI;
533
534                 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
535                 if (*buf == 0)
536                         *buf = UART_IIR_NO_INT;
537
538                 /* set bit 6 & 7 to be 16550 compatible */
539                 *buf |= 0xC0;
540                 mutex_unlock(&mdev_state->rxtx_lock);
541         }
542         break;
543
544         case UART_LCR:
545         case UART_MCR:
546                 *buf = mdev_state->s[index].uart_reg[offset];
547                 break;
548
549         case UART_LSR:
550         {
551                 u8 lsr = 0;
552
553                 mutex_lock(&mdev_state->rxtx_lock);
554                 /* atleast one char in FIFO */
555                 if (mdev_state->s[index].rxtx.head !=
556                                  mdev_state->s[index].rxtx.tail)
557                         lsr |= UART_LSR_DR;
558
559                 /* if FIFO overrun */
560                 if (mdev_state->s[index].overrun)
561                         lsr |= UART_LSR_OE;
562
563                 /* transmit FIFO empty and tramsitter empty */
564                 if (mdev_state->s[index].rxtx.head ==
565                                  mdev_state->s[index].rxtx.tail)
566                         lsr |= UART_LSR_TEMT | UART_LSR_THRE;
567
568                 mutex_unlock(&mdev_state->rxtx_lock);
569                 *buf = lsr;
570                 break;
571         }
572         case UART_MSR:
573                 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
574
575                 mutex_lock(&mdev_state->rxtx_lock);
576                 /* if AFE is 1 and FIFO have space, set CTS bit */
577                 if (mdev_state->s[index].uart_reg[UART_MCR] &
578                                                  UART_MCR_AFE) {
579                         if (mdev_state->s[index].rxtx.count <
580                                         mdev_state->s[index].max_fifo_size)
581                                 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
582                 } else
583                         *buf |= UART_MSR_CTS | UART_MSR_DCTS;
584                 mutex_unlock(&mdev_state->rxtx_lock);
585
586                 break;
587
588         case UART_SCR:
589                 *buf = mdev_state->s[index].uart_reg[offset];
590                 break;
591
592         default:
593                 break;
594         }
595 }
596
597 static void mdev_read_base(struct mdev_state *mdev_state)
598 {
599         int index, pos;
600         u32 start_lo, start_hi;
601         u32 mem_type;
602
603         pos = PCI_BASE_ADDRESS_0;
604
605         for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
606
607                 if (!mdev_state->region_info[index].size)
608                         continue;
609
610                 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
611                         PCI_BASE_ADDRESS_MEM_MASK;
612                 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
613                         PCI_BASE_ADDRESS_MEM_TYPE_MASK;
614
615                 switch (mem_type) {
616                 case PCI_BASE_ADDRESS_MEM_TYPE_64:
617                         start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
618                         pos += 4;
619                         break;
620                 case PCI_BASE_ADDRESS_MEM_TYPE_32:
621                 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
622                         /* 1M mem BAR treated as 32-bit BAR */
623                 default:
624                         /* mem unknown type treated as 32-bit BAR */
625                         start_hi = 0;
626                         break;
627                 }
628                 pos += 4;
629                 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
630                                                         start_lo;
631         }
632 }
633
634 static ssize_t mdev_access(struct mdev_device *mdev, u8 *buf, size_t count,
635                            loff_t pos, bool is_write)
636 {
637         struct mdev_state *mdev_state;
638         unsigned int index;
639         loff_t offset;
640         int ret = 0;
641
642         if (!mdev || !buf)
643                 return -EINVAL;
644
645         mdev_state = mdev_get_drvdata(mdev);
646         if (!mdev_state) {
647                 pr_err("%s mdev_state not found\n", __func__);
648                 return -EINVAL;
649         }
650
651         mutex_lock(&mdev_state->ops_lock);
652
653         index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
654         offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
655         switch (index) {
656         case VFIO_PCI_CONFIG_REGION_INDEX:
657
658 #if defined(DEBUG)
659                 pr_info("%s: PCI config space %s at offset 0x%llx\n",
660                          __func__, is_write ? "write" : "read", offset);
661 #endif
662                 if (is_write) {
663                         dump_buffer(buf, count);
664                         handle_pci_cfg_write(mdev_state, offset, buf, count);
665                 } else {
666                         memcpy(buf, (mdev_state->vconfig + offset), count);
667                         dump_buffer(buf, count);
668                 }
669
670                 break;
671
672         case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
673                 if (!mdev_state->region_info[index].start)
674                         mdev_read_base(mdev_state);
675
676                 if (is_write) {
677                         dump_buffer(buf, count);
678
679 #if defined(DEBUG_REGS)
680                         pr_info("%s: BAR%d  WR @0x%llx %s val:0x%02x dlab:%d\n",
681                                 __func__, index, offset, wr_reg[offset],
682                                 *buf, mdev_state->s[index].dlab);
683 #endif
684                         handle_bar_write(index, mdev_state, offset, buf, count);
685                 } else {
686                         handle_bar_read(index, mdev_state, offset, buf, count);
687                         dump_buffer(buf, count);
688
689 #if defined(DEBUG_REGS)
690                         pr_info("%s: BAR%d  RD @0x%llx %s val:0x%02x dlab:%d\n",
691                                 __func__, index, offset, rd_reg[offset],
692                                 *buf, mdev_state->s[index].dlab);
693 #endif
694                 }
695                 break;
696
697         default:
698                 ret = -1;
699                 goto accessfailed;
700         }
701
702         ret = count;
703
704
705 accessfailed:
706         mutex_unlock(&mdev_state->ops_lock);
707
708         return ret;
709 }
710
711 static int mtty_create(struct mdev_device *mdev)
712 {
713         struct mdev_state *mdev_state;
714         int nr_ports = mdev_get_type_group_id(mdev) + 1;
715
716         mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
717         if (mdev_state == NULL)
718                 return -ENOMEM;
719
720         mdev_state->nr_ports = nr_ports;
721         mdev_state->irq_index = -1;
722         mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
723         mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
724         mutex_init(&mdev_state->rxtx_lock);
725         mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
726
727         if (mdev_state->vconfig == NULL) {
728                 kfree(mdev_state);
729                 return -ENOMEM;
730         }
731
732         mutex_init(&mdev_state->ops_lock);
733         mdev_state->mdev = mdev;
734         mdev_set_drvdata(mdev, mdev_state);
735
736         mtty_create_config_space(mdev_state);
737
738         mutex_lock(&mdev_list_lock);
739         list_add(&mdev_state->next, &mdev_devices_list);
740         mutex_unlock(&mdev_list_lock);
741
742         return 0;
743 }
744
745 static int mtty_remove(struct mdev_device *mdev)
746 {
747         struct mdev_state *mds, *tmp_mds;
748         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
749         int ret = -EINVAL;
750
751         mutex_lock(&mdev_list_lock);
752         list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
753                 if (mdev_state == mds) {
754                         list_del(&mdev_state->next);
755                         mdev_set_drvdata(mdev, NULL);
756                         kfree(mdev_state->vconfig);
757                         kfree(mdev_state);
758                         ret = 0;
759                         break;
760                 }
761         }
762         mutex_unlock(&mdev_list_lock);
763
764         return ret;
765 }
766
767 static int mtty_reset(struct mdev_device *mdev)
768 {
769         struct mdev_state *mdev_state;
770
771         if (!mdev)
772                 return -EINVAL;
773
774         mdev_state = mdev_get_drvdata(mdev);
775         if (!mdev_state)
776                 return -EINVAL;
777
778         pr_info("%s: called\n", __func__);
779
780         return 0;
781 }
782
783 static ssize_t mtty_read(struct mdev_device *mdev, char __user *buf,
784                          size_t count, loff_t *ppos)
785 {
786         unsigned int done = 0;
787         int ret;
788
789         while (count) {
790                 size_t filled;
791
792                 if (count >= 4 && !(*ppos % 4)) {
793                         u32 val;
794
795                         ret =  mdev_access(mdev, (u8 *)&val, sizeof(val),
796                                            *ppos, false);
797                         if (ret <= 0)
798                                 goto read_err;
799
800                         if (copy_to_user(buf, &val, sizeof(val)))
801                                 goto read_err;
802
803                         filled = 4;
804                 } else if (count >= 2 && !(*ppos % 2)) {
805                         u16 val;
806
807                         ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
808                                           *ppos, false);
809                         if (ret <= 0)
810                                 goto read_err;
811
812                         if (copy_to_user(buf, &val, sizeof(val)))
813                                 goto read_err;
814
815                         filled = 2;
816                 } else {
817                         u8 val;
818
819                         ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
820                                           *ppos, false);
821                         if (ret <= 0)
822                                 goto read_err;
823
824                         if (copy_to_user(buf, &val, sizeof(val)))
825                                 goto read_err;
826
827                         filled = 1;
828                 }
829
830                 count -= filled;
831                 done += filled;
832                 *ppos += filled;
833                 buf += filled;
834         }
835
836         return done;
837
838 read_err:
839         return -EFAULT;
840 }
841
842 static ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
843                    size_t count, loff_t *ppos)
844 {
845         unsigned int done = 0;
846         int ret;
847
848         while (count) {
849                 size_t filled;
850
851                 if (count >= 4 && !(*ppos % 4)) {
852                         u32 val;
853
854                         if (copy_from_user(&val, buf, sizeof(val)))
855                                 goto write_err;
856
857                         ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
858                                           *ppos, true);
859                         if (ret <= 0)
860                                 goto write_err;
861
862                         filled = 4;
863                 } else if (count >= 2 && !(*ppos % 2)) {
864                         u16 val;
865
866                         if (copy_from_user(&val, buf, sizeof(val)))
867                                 goto write_err;
868
869                         ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
870                                           *ppos, true);
871                         if (ret <= 0)
872                                 goto write_err;
873
874                         filled = 2;
875                 } else {
876                         u8 val;
877
878                         if (copy_from_user(&val, buf, sizeof(val)))
879                                 goto write_err;
880
881                         ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
882                                           *ppos, true);
883                         if (ret <= 0)
884                                 goto write_err;
885
886                         filled = 1;
887                 }
888                 count -= filled;
889                 done += filled;
890                 *ppos += filled;
891                 buf += filled;
892         }
893
894         return done;
895 write_err:
896         return -EFAULT;
897 }
898
899 static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
900                          unsigned int index, unsigned int start,
901                          unsigned int count, void *data)
902 {
903         int ret = 0;
904         struct mdev_state *mdev_state;
905
906         if (!mdev)
907                 return -EINVAL;
908
909         mdev_state = mdev_get_drvdata(mdev);
910         if (!mdev_state)
911                 return -EINVAL;
912
913         mutex_lock(&mdev_state->ops_lock);
914         switch (index) {
915         case VFIO_PCI_INTX_IRQ_INDEX:
916                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
917                 case VFIO_IRQ_SET_ACTION_MASK:
918                 case VFIO_IRQ_SET_ACTION_UNMASK:
919                         break;
920                 case VFIO_IRQ_SET_ACTION_TRIGGER:
921                 {
922                         if (flags & VFIO_IRQ_SET_DATA_NONE) {
923                                 pr_info("%s: disable INTx\n", __func__);
924                                 if (mdev_state->intx_evtfd)
925                                         eventfd_ctx_put(mdev_state->intx_evtfd);
926                                 break;
927                         }
928
929                         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
930                                 int fd = *(int *)data;
931
932                                 if (fd > 0) {
933                                         struct eventfd_ctx *evt;
934
935                                         evt = eventfd_ctx_fdget(fd);
936                                         if (IS_ERR(evt)) {
937                                                 ret = PTR_ERR(evt);
938                                                 break;
939                                         }
940                                         mdev_state->intx_evtfd = evt;
941                                         mdev_state->irq_fd = fd;
942                                         mdev_state->irq_index = index;
943                                         break;
944                                 }
945                         }
946                         break;
947                 }
948                 }
949                 break;
950         case VFIO_PCI_MSI_IRQ_INDEX:
951                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
952                 case VFIO_IRQ_SET_ACTION_MASK:
953                 case VFIO_IRQ_SET_ACTION_UNMASK:
954                         break;
955                 case VFIO_IRQ_SET_ACTION_TRIGGER:
956                         if (flags & VFIO_IRQ_SET_DATA_NONE) {
957                                 if (mdev_state->msi_evtfd)
958                                         eventfd_ctx_put(mdev_state->msi_evtfd);
959                                 pr_info("%s: disable MSI\n", __func__);
960                                 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
961                                 break;
962                         }
963                         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
964                                 int fd = *(int *)data;
965                                 struct eventfd_ctx *evt;
966
967                                 if (fd <= 0)
968                                         break;
969
970                                 if (mdev_state->msi_evtfd)
971                                         break;
972
973                                 evt = eventfd_ctx_fdget(fd);
974                                 if (IS_ERR(evt)) {
975                                         ret = PTR_ERR(evt);
976                                         break;
977                                 }
978                                 mdev_state->msi_evtfd = evt;
979                                 mdev_state->irq_fd = fd;
980                                 mdev_state->irq_index = index;
981                         }
982                         break;
983         }
984         break;
985         case VFIO_PCI_MSIX_IRQ_INDEX:
986                 pr_info("%s: MSIX_IRQ\n", __func__);
987                 break;
988         case VFIO_PCI_ERR_IRQ_INDEX:
989                 pr_info("%s: ERR_IRQ\n", __func__);
990                 break;
991         case VFIO_PCI_REQ_IRQ_INDEX:
992                 pr_info("%s: REQ_IRQ\n", __func__);
993                 break;
994         }
995
996         mutex_unlock(&mdev_state->ops_lock);
997         return ret;
998 }
999
1000 static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
1001 {
1002         int ret = -1;
1003
1004         if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1005             (!mdev_state->msi_evtfd))
1006                 return -EINVAL;
1007         else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1008                  (!mdev_state->intx_evtfd)) {
1009                 pr_info("%s: Intr eventfd not found\n", __func__);
1010                 return -EINVAL;
1011         }
1012
1013         if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1014                 ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1015         else
1016                 ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1017
1018 #if defined(DEBUG_INTR)
1019         pr_info("Intx triggered\n");
1020 #endif
1021         if (ret != 1)
1022                 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1023
1024         return ret;
1025 }
1026
1027 static int mtty_get_region_info(struct mdev_device *mdev,
1028                          struct vfio_region_info *region_info,
1029                          u16 *cap_type_id, void **cap_type)
1030 {
1031         unsigned int size = 0;
1032         struct mdev_state *mdev_state;
1033         u32 bar_index;
1034
1035         if (!mdev)
1036                 return -EINVAL;
1037
1038         mdev_state = mdev_get_drvdata(mdev);
1039         if (!mdev_state)
1040                 return -EINVAL;
1041
1042         bar_index = region_info->index;
1043         if (bar_index >= VFIO_PCI_NUM_REGIONS)
1044                 return -EINVAL;
1045
1046         mutex_lock(&mdev_state->ops_lock);
1047
1048         switch (bar_index) {
1049         case VFIO_PCI_CONFIG_REGION_INDEX:
1050                 size = MTTY_CONFIG_SPACE_SIZE;
1051                 break;
1052         case VFIO_PCI_BAR0_REGION_INDEX:
1053                 size = MTTY_IO_BAR_SIZE;
1054                 break;
1055         case VFIO_PCI_BAR1_REGION_INDEX:
1056                 if (mdev_state->nr_ports == 2)
1057                         size = MTTY_IO_BAR_SIZE;
1058                 break;
1059         default:
1060                 size = 0;
1061                 break;
1062         }
1063
1064         mdev_state->region_info[bar_index].size = size;
1065         mdev_state->region_info[bar_index].vfio_offset =
1066                 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1067
1068         region_info->size = size;
1069         region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1070         region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1071                 VFIO_REGION_INFO_FLAG_WRITE;
1072         mutex_unlock(&mdev_state->ops_lock);
1073         return 0;
1074 }
1075
1076 static int mtty_get_irq_info(struct mdev_device *mdev,
1077                              struct vfio_irq_info *irq_info)
1078 {
1079         switch (irq_info->index) {
1080         case VFIO_PCI_INTX_IRQ_INDEX:
1081         case VFIO_PCI_MSI_IRQ_INDEX:
1082         case VFIO_PCI_REQ_IRQ_INDEX:
1083                 break;
1084
1085         default:
1086                 return -EINVAL;
1087         }
1088
1089         irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1090         irq_info->count = 1;
1091
1092         if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1093                 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1094                                 VFIO_IRQ_INFO_AUTOMASKED);
1095         else
1096                 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1097
1098         return 0;
1099 }
1100
1101 static int mtty_get_device_info(struct mdev_device *mdev,
1102                          struct vfio_device_info *dev_info)
1103 {
1104         dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1105         dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1106         dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1107
1108         return 0;
1109 }
1110
1111 static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1112                         unsigned long arg)
1113 {
1114         int ret = 0;
1115         unsigned long minsz;
1116         struct mdev_state *mdev_state;
1117
1118         if (!mdev)
1119                 return -EINVAL;
1120
1121         mdev_state = mdev_get_drvdata(mdev);
1122         if (!mdev_state)
1123                 return -ENODEV;
1124
1125         switch (cmd) {
1126         case VFIO_DEVICE_GET_INFO:
1127         {
1128                 struct vfio_device_info info;
1129
1130                 minsz = offsetofend(struct vfio_device_info, num_irqs);
1131
1132                 if (copy_from_user(&info, (void __user *)arg, minsz))
1133                         return -EFAULT;
1134
1135                 if (info.argsz < minsz)
1136                         return -EINVAL;
1137
1138                 ret = mtty_get_device_info(mdev, &info);
1139                 if (ret)
1140                         return ret;
1141
1142                 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1143
1144                 if (copy_to_user((void __user *)arg, &info, minsz))
1145                         return -EFAULT;
1146
1147                 return 0;
1148         }
1149         case VFIO_DEVICE_GET_REGION_INFO:
1150         {
1151                 struct vfio_region_info info;
1152                 u16 cap_type_id = 0;
1153                 void *cap_type = NULL;
1154
1155                 minsz = offsetofend(struct vfio_region_info, offset);
1156
1157                 if (copy_from_user(&info, (void __user *)arg, minsz))
1158                         return -EFAULT;
1159
1160                 if (info.argsz < minsz)
1161                         return -EINVAL;
1162
1163                 ret = mtty_get_region_info(mdev, &info, &cap_type_id,
1164                                            &cap_type);
1165                 if (ret)
1166                         return ret;
1167
1168                 if (copy_to_user((void __user *)arg, &info, minsz))
1169                         return -EFAULT;
1170
1171                 return 0;
1172         }
1173
1174         case VFIO_DEVICE_GET_IRQ_INFO:
1175         {
1176                 struct vfio_irq_info info;
1177
1178                 minsz = offsetofend(struct vfio_irq_info, count);
1179
1180                 if (copy_from_user(&info, (void __user *)arg, minsz))
1181                         return -EFAULT;
1182
1183                 if ((info.argsz < minsz) ||
1184                     (info.index >= mdev_state->dev_info.num_irqs))
1185                         return -EINVAL;
1186
1187                 ret = mtty_get_irq_info(mdev, &info);
1188                 if (ret)
1189                         return ret;
1190
1191                 if (copy_to_user((void __user *)arg, &info, minsz))
1192                         return -EFAULT;
1193
1194                 return 0;
1195         }
1196         case VFIO_DEVICE_SET_IRQS:
1197         {
1198                 struct vfio_irq_set hdr;
1199                 u8 *data = NULL, *ptr = NULL;
1200                 size_t data_size = 0;
1201
1202                 minsz = offsetofend(struct vfio_irq_set, count);
1203
1204                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1205                         return -EFAULT;
1206
1207                 ret = vfio_set_irqs_validate_and_prepare(&hdr,
1208                                                 mdev_state->dev_info.num_irqs,
1209                                                 VFIO_PCI_NUM_IRQS,
1210                                                 &data_size);
1211                 if (ret)
1212                         return ret;
1213
1214                 if (data_size) {
1215                         ptr = data = memdup_user((void __user *)(arg + minsz),
1216                                                  data_size);
1217                         if (IS_ERR(data))
1218                                 return PTR_ERR(data);
1219                 }
1220
1221                 ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
1222                                     hdr.count, data);
1223
1224                 kfree(ptr);
1225                 return ret;
1226         }
1227         case VFIO_DEVICE_RESET:
1228                 return mtty_reset(mdev);
1229         }
1230         return -ENOTTY;
1231 }
1232
1233 static int mtty_open(struct mdev_device *mdev)
1234 {
1235         pr_info("%s\n", __func__);
1236         return 0;
1237 }
1238
1239 static void mtty_close(struct mdev_device *mdev)
1240 {
1241         pr_info("%s\n", __func__);
1242 }
1243
1244 static ssize_t
1245 sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
1246                      char *buf)
1247 {
1248         return sprintf(buf, "This is phy device\n");
1249 }
1250
1251 static DEVICE_ATTR_RO(sample_mtty_dev);
1252
1253 static struct attribute *mtty_dev_attrs[] = {
1254         &dev_attr_sample_mtty_dev.attr,
1255         NULL,
1256 };
1257
1258 static const struct attribute_group mtty_dev_group = {
1259         .name  = "mtty_dev",
1260         .attrs = mtty_dev_attrs,
1261 };
1262
1263 static const struct attribute_group *mtty_dev_groups[] = {
1264         &mtty_dev_group,
1265         NULL,
1266 };
1267
1268 static ssize_t
1269 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1270                      char *buf)
1271 {
1272         if (mdev_from_dev(dev))
1273                 return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
1274
1275         return sprintf(buf, "\n");
1276 }
1277
1278 static DEVICE_ATTR_RO(sample_mdev_dev);
1279
1280 static struct attribute *mdev_dev_attrs[] = {
1281         &dev_attr_sample_mdev_dev.attr,
1282         NULL,
1283 };
1284
1285 static const struct attribute_group mdev_dev_group = {
1286         .name  = "vendor",
1287         .attrs = mdev_dev_attrs,
1288 };
1289
1290 static const struct attribute_group *mdev_dev_groups[] = {
1291         &mdev_dev_group,
1292         NULL,
1293 };
1294
1295 static ssize_t name_show(struct mdev_type *mtype,
1296                          struct mdev_type_attribute *attr, char *buf)
1297 {
1298         static const char *name_str[2] = { "Single port serial",
1299                                            "Dual port serial" };
1300
1301         return sysfs_emit(buf, "%s\n",
1302                           name_str[mtype_get_type_group_id(mtype)]);
1303 }
1304
1305 static MDEV_TYPE_ATTR_RO(name);
1306
1307 static ssize_t available_instances_show(struct mdev_type *mtype,
1308                                         struct mdev_type_attribute *attr,
1309                                         char *buf)
1310 {
1311         struct mdev_state *mds;
1312         unsigned int ports = mtype_get_type_group_id(mtype) + 1;
1313         int used = 0;
1314
1315         list_for_each_entry(mds, &mdev_devices_list, next)
1316                 used += mds->nr_ports;
1317
1318         return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
1319 }
1320
1321 static MDEV_TYPE_ATTR_RO(available_instances);
1322
1323 static ssize_t device_api_show(struct mdev_type *mtype,
1324                                struct mdev_type_attribute *attr, char *buf)
1325 {
1326         return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1327 }
1328
1329 static MDEV_TYPE_ATTR_RO(device_api);
1330
1331 static struct attribute *mdev_types_attrs[] = {
1332         &mdev_type_attr_name.attr,
1333         &mdev_type_attr_device_api.attr,
1334         &mdev_type_attr_available_instances.attr,
1335         NULL,
1336 };
1337
1338 static struct attribute_group mdev_type_group1 = {
1339         .name  = "1",
1340         .attrs = mdev_types_attrs,
1341 };
1342
1343 static struct attribute_group mdev_type_group2 = {
1344         .name  = "2",
1345         .attrs = mdev_types_attrs,
1346 };
1347
1348 static struct attribute_group *mdev_type_groups[] = {
1349         &mdev_type_group1,
1350         &mdev_type_group2,
1351         NULL,
1352 };
1353
1354 static const struct mdev_parent_ops mdev_fops = {
1355         .owner                  = THIS_MODULE,
1356         .dev_attr_groups        = mtty_dev_groups,
1357         .mdev_attr_groups       = mdev_dev_groups,
1358         .supported_type_groups  = mdev_type_groups,
1359         .create                 = mtty_create,
1360         .remove                 = mtty_remove,
1361         .open                   = mtty_open,
1362         .release                = mtty_close,
1363         .read                   = mtty_read,
1364         .write                  = mtty_write,
1365         .ioctl                  = mtty_ioctl,
1366 };
1367
1368 static void mtty_device_release(struct device *dev)
1369 {
1370         dev_dbg(dev, "mtty: released\n");
1371 }
1372
1373 static int __init mtty_dev_init(void)
1374 {
1375         int ret = 0;
1376
1377         pr_info("mtty_dev: %s\n", __func__);
1378
1379         memset(&mtty_dev, 0, sizeof(mtty_dev));
1380
1381         idr_init(&mtty_dev.vd_idr);
1382
1383         ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
1384                                   MTTY_NAME);
1385
1386         if (ret < 0) {
1387                 pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1388                 return ret;
1389         }
1390
1391         cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1392         cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
1393
1394         pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1395
1396         mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1397
1398         if (IS_ERR(mtty_dev.vd_class)) {
1399                 pr_err("Error: failed to register mtty_dev class\n");
1400                 ret = PTR_ERR(mtty_dev.vd_class);
1401                 goto failed1;
1402         }
1403
1404         mtty_dev.dev.class = mtty_dev.vd_class;
1405         mtty_dev.dev.release = mtty_device_release;
1406         dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1407
1408         ret = device_register(&mtty_dev.dev);
1409         if (ret)
1410                 goto failed2;
1411
1412         ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
1413         if (ret)
1414                 goto failed3;
1415
1416         mutex_init(&mdev_list_lock);
1417         INIT_LIST_HEAD(&mdev_devices_list);
1418
1419         goto all_done;
1420
1421 failed3:
1422
1423         device_unregister(&mtty_dev.dev);
1424 failed2:
1425         class_destroy(mtty_dev.vd_class);
1426
1427 failed1:
1428         cdev_del(&mtty_dev.vd_cdev);
1429         unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
1430
1431 all_done:
1432         return ret;
1433 }
1434
1435 static void __exit mtty_dev_exit(void)
1436 {
1437         mtty_dev.dev.bus = NULL;
1438         mdev_unregister_device(&mtty_dev.dev);
1439
1440         device_unregister(&mtty_dev.dev);
1441         idr_destroy(&mtty_dev.vd_idr);
1442         cdev_del(&mtty_dev.vd_cdev);
1443         unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
1444         class_destroy(mtty_dev.vd_class);
1445         mtty_dev.vd_class = NULL;
1446         pr_info("mtty_dev: Unloaded!\n");
1447 }
1448
1449 module_init(mtty_dev_init)
1450 module_exit(mtty_dev_exit)
1451
1452 MODULE_LICENSE("GPL v2");
1453 MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1454 MODULE_VERSION(VERSION_STRING);
1455 MODULE_AUTHOR(DRIVER_AUTHOR);