Merge tag 'qcom-fixes-for-5.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / misc / genwqe / card_dev.c
1 /**
2  * IBM Accelerator Family 'GenWQE'
3  *
4  * (C) Copyright IBM Corp. 2013
5  *
6  * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7  * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8  * Author: Michael Jung <mijung@gmx.net>
9  * Author: Michael Ruettger <michael@ibmra.de>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License (version 2 only)
13  * as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  */
20
21 /*
22  * Character device representation of the GenWQE device. This allows
23  * user-space applications to communicate with the card.
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/string.h>
31 #include <linux/fs.h>
32 #include <linux/sched/signal.h>
33 #include <linux/wait.h>
34 #include <linux/delay.h>
35 #include <linux/atomic.h>
36
37 #include "card_base.h"
38 #include "card_ddcb.h"
39
40 static int genwqe_open_files(struct genwqe_dev *cd)
41 {
42         int rc;
43         unsigned long flags;
44
45         spin_lock_irqsave(&cd->file_lock, flags);
46         rc = list_empty(&cd->file_list);
47         spin_unlock_irqrestore(&cd->file_lock, flags);
48         return !rc;
49 }
50
51 static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
52 {
53         unsigned long flags;
54
55         cfile->opener = get_pid(task_tgid(current));
56         spin_lock_irqsave(&cd->file_lock, flags);
57         list_add(&cfile->list, &cd->file_list);
58         spin_unlock_irqrestore(&cd->file_lock, flags);
59 }
60
61 static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
62 {
63         unsigned long flags;
64
65         spin_lock_irqsave(&cd->file_lock, flags);
66         list_del(&cfile->list);
67         spin_unlock_irqrestore(&cd->file_lock, flags);
68         put_pid(cfile->opener);
69
70         return 0;
71 }
72
73 static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
74 {
75         unsigned long flags;
76
77         spin_lock_irqsave(&cfile->pin_lock, flags);
78         list_add(&m->pin_list, &cfile->pin_list);
79         spin_unlock_irqrestore(&cfile->pin_lock, flags);
80 }
81
82 static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
83 {
84         unsigned long flags;
85
86         spin_lock_irqsave(&cfile->pin_lock, flags);
87         list_del(&m->pin_list);
88         spin_unlock_irqrestore(&cfile->pin_lock, flags);
89
90         return 0;
91 }
92
93 /**
94  * genwqe_search_pin() - Search for the mapping for a userspace address
95  * @cfile:      Descriptor of opened file
96  * @u_addr:     User virtual address
97  * @size:       Size of buffer
98  * @dma_addr:   DMA address to be updated
99  *
100  * Return: Pointer to the corresponding mapping NULL if not found
101  */
102 static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
103                                             unsigned long u_addr,
104                                             unsigned int size,
105                                             void **virt_addr)
106 {
107         unsigned long flags;
108         struct dma_mapping *m;
109
110         spin_lock_irqsave(&cfile->pin_lock, flags);
111
112         list_for_each_entry(m, &cfile->pin_list, pin_list) {
113                 if ((((u64)m->u_vaddr) <= (u_addr)) &&
114                     (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
115
116                         if (virt_addr)
117                                 *virt_addr = m->k_vaddr +
118                                         (u_addr - (u64)m->u_vaddr);
119
120                         spin_unlock_irqrestore(&cfile->pin_lock, flags);
121                         return m;
122                 }
123         }
124         spin_unlock_irqrestore(&cfile->pin_lock, flags);
125         return NULL;
126 }
127
128 static void __genwqe_add_mapping(struct genwqe_file *cfile,
129                               struct dma_mapping *dma_map)
130 {
131         unsigned long flags;
132
133         spin_lock_irqsave(&cfile->map_lock, flags);
134         list_add(&dma_map->card_list, &cfile->map_list);
135         spin_unlock_irqrestore(&cfile->map_lock, flags);
136 }
137
138 static void __genwqe_del_mapping(struct genwqe_file *cfile,
139                               struct dma_mapping *dma_map)
140 {
141         unsigned long flags;
142
143         spin_lock_irqsave(&cfile->map_lock, flags);
144         list_del(&dma_map->card_list);
145         spin_unlock_irqrestore(&cfile->map_lock, flags);
146 }
147
148
149 /**
150  * __genwqe_search_mapping() - Search for the mapping for a userspace address
151  * @cfile:      descriptor of opened file
152  * @u_addr:     user virtual address
153  * @size:       size of buffer
154  * @dma_addr:   DMA address to be updated
155  * Return: Pointer to the corresponding mapping NULL if not found
156  */
157 static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
158                                                    unsigned long u_addr,
159                                                    unsigned int size,
160                                                    dma_addr_t *dma_addr,
161                                                    void **virt_addr)
162 {
163         unsigned long flags;
164         struct dma_mapping *m;
165         struct pci_dev *pci_dev = cfile->cd->pci_dev;
166
167         spin_lock_irqsave(&cfile->map_lock, flags);
168         list_for_each_entry(m, &cfile->map_list, card_list) {
169
170                 if ((((u64)m->u_vaddr) <= (u_addr)) &&
171                     (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
172
173                         /* match found: current is as expected and
174                            addr is in range */
175                         if (dma_addr)
176                                 *dma_addr = m->dma_addr +
177                                         (u_addr - (u64)m->u_vaddr);
178
179                         if (virt_addr)
180                                 *virt_addr = m->k_vaddr +
181                                         (u_addr - (u64)m->u_vaddr);
182
183                         spin_unlock_irqrestore(&cfile->map_lock, flags);
184                         return m;
185                 }
186         }
187         spin_unlock_irqrestore(&cfile->map_lock, flags);
188
189         dev_err(&pci_dev->dev,
190                 "[%s] Entry not found: u_addr=%lx, size=%x\n",
191                 __func__, u_addr, size);
192
193         return NULL;
194 }
195
196 static void genwqe_remove_mappings(struct genwqe_file *cfile)
197 {
198         int i = 0;
199         struct list_head *node, *next;
200         struct dma_mapping *dma_map;
201         struct genwqe_dev *cd = cfile->cd;
202         struct pci_dev *pci_dev = cfile->cd->pci_dev;
203
204         list_for_each_safe(node, next, &cfile->map_list) {
205                 dma_map = list_entry(node, struct dma_mapping, card_list);
206
207                 list_del_init(&dma_map->card_list);
208
209                 /*
210                  * This is really a bug, because those things should
211                  * have been already tidied up.
212                  *
213                  * GENWQE_MAPPING_RAW should have been removed via mmunmap().
214                  * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
215                  */
216                 dev_err(&pci_dev->dev,
217                         "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n",
218                         __func__, i++, dma_map->u_vaddr,
219                         (unsigned long)dma_map->k_vaddr,
220                         (unsigned long)dma_map->dma_addr);
221
222                 if (dma_map->type == GENWQE_MAPPING_RAW) {
223                         /* we allocated this dynamically */
224                         __genwqe_free_consistent(cd, dma_map->size,
225                                                 dma_map->k_vaddr,
226                                                 dma_map->dma_addr);
227                         kfree(dma_map);
228                 } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
229                         /* we use dma_map statically from the request */
230                         genwqe_user_vunmap(cd, dma_map);
231                 }
232         }
233 }
234
235 static void genwqe_remove_pinnings(struct genwqe_file *cfile)
236 {
237         struct list_head *node, *next;
238         struct dma_mapping *dma_map;
239         struct genwqe_dev *cd = cfile->cd;
240
241         list_for_each_safe(node, next, &cfile->pin_list) {
242                 dma_map = list_entry(node, struct dma_mapping, pin_list);
243
244                 /*
245                  * This is not a bug, because a killed processed might
246                  * not call the unpin ioctl, which is supposed to free
247                  * the resources.
248                  *
249                  * Pinnings are dymically allocated and need to be
250                  * deleted.
251                  */
252                 list_del_init(&dma_map->pin_list);
253                 genwqe_user_vunmap(cd, dma_map);
254                 kfree(dma_map);
255         }
256 }
257
258 /**
259  * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
260  *
261  * E.g. genwqe_send_signal(cd, SIGIO);
262  */
263 static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
264 {
265         unsigned int files = 0;
266         unsigned long flags;
267         struct genwqe_file *cfile;
268
269         spin_lock_irqsave(&cd->file_lock, flags);
270         list_for_each_entry(cfile, &cd->file_list, list) {
271                 if (cfile->async_queue)
272                         kill_fasync(&cfile->async_queue, sig, POLL_HUP);
273                 files++;
274         }
275         spin_unlock_irqrestore(&cd->file_lock, flags);
276         return files;
277 }
278
279 static int genwqe_terminate(struct genwqe_dev *cd)
280 {
281         unsigned int files = 0;
282         unsigned long flags;
283         struct genwqe_file *cfile;
284
285         spin_lock_irqsave(&cd->file_lock, flags);
286         list_for_each_entry(cfile, &cd->file_list, list) {
287                 kill_pid(cfile->opener, SIGKILL, 1);
288                 files++;
289         }
290         spin_unlock_irqrestore(&cd->file_lock, flags);
291         return files;
292 }
293
294 /**
295  * genwqe_open() - file open
296  * @inode:      file system information
297  * @filp:       file handle
298  *
299  * This function is executed whenever an application calls
300  * open("/dev/genwqe",..).
301  *
302  * Return: 0 if successful or <0 if errors
303  */
304 static int genwqe_open(struct inode *inode, struct file *filp)
305 {
306         struct genwqe_dev *cd;
307         struct genwqe_file *cfile;
308
309         cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
310         if (cfile == NULL)
311                 return -ENOMEM;
312
313         cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
314         cfile->cd = cd;
315         cfile->filp = filp;
316         cfile->client = NULL;
317
318         spin_lock_init(&cfile->map_lock);  /* list of raw memory allocations */
319         INIT_LIST_HEAD(&cfile->map_list);
320
321         spin_lock_init(&cfile->pin_lock);  /* list of user pinned memory */
322         INIT_LIST_HEAD(&cfile->pin_list);
323
324         filp->private_data = cfile;
325
326         genwqe_add_file(cd, cfile);
327         return 0;
328 }
329
330 /**
331  * genwqe_fasync() - Setup process to receive SIGIO.
332  * @fd:        file descriptor
333  * @filp:      file handle
334  * @mode:      file mode
335  *
336  * Sending a signal is working as following:
337  *
338  * if (cdev->async_queue)
339  *         kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
340  *
341  * Some devices also implement asynchronous notification to indicate
342  * when the device can be written; in this case, of course,
343  * kill_fasync must be called with a mode of POLL_OUT.
344  */
345 static int genwqe_fasync(int fd, struct file *filp, int mode)
346 {
347         struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
348
349         return fasync_helper(fd, filp, mode, &cdev->async_queue);
350 }
351
352
353 /**
354  * genwqe_release() - file close
355  * @inode:      file system information
356  * @filp:       file handle
357  *
358  * This function is executed whenever an application calls 'close(fd_genwqe)'
359  *
360  * Return: always 0
361  */
362 static int genwqe_release(struct inode *inode, struct file *filp)
363 {
364         struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
365         struct genwqe_dev *cd = cfile->cd;
366
367         /* there must be no entries in these lists! */
368         genwqe_remove_mappings(cfile);
369         genwqe_remove_pinnings(cfile);
370
371         /* remove this filp from the asynchronously notified filp's */
372         genwqe_fasync(-1, filp, 0);
373
374         /*
375          * For this to work we must not release cd when this cfile is
376          * not yet released, otherwise the list entry is invalid,
377          * because the list itself gets reinstantiated!
378          */
379         genwqe_del_file(cd, cfile);
380         kfree(cfile);
381         return 0;
382 }
383
384 static void genwqe_vma_open(struct vm_area_struct *vma)
385 {
386         /* nothing ... */
387 }
388
389 /**
390  * genwqe_vma_close() - Called each time when vma is unmapped
391  *
392  * Free memory which got allocated by GenWQE mmap().
393  */
394 static void genwqe_vma_close(struct vm_area_struct *vma)
395 {
396         unsigned long vsize = vma->vm_end - vma->vm_start;
397         struct inode *inode = file_inode(vma->vm_file);
398         struct dma_mapping *dma_map;
399         struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
400                                             cdev_genwqe);
401         struct pci_dev *pci_dev = cd->pci_dev;
402         dma_addr_t d_addr = 0;
403         struct genwqe_file *cfile = vma->vm_private_data;
404
405         dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
406                                          &d_addr, NULL);
407         if (dma_map == NULL) {
408                 dev_err(&pci_dev->dev,
409                         "  [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
410                         __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
411                         vsize);
412                 return;
413         }
414         __genwqe_del_mapping(cfile, dma_map);
415         __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
416                                  dma_map->dma_addr);
417         kfree(dma_map);
418 }
419
420 static const struct vm_operations_struct genwqe_vma_ops = {
421         .open   = genwqe_vma_open,
422         .close  = genwqe_vma_close,
423 };
424
425 /**
426  * genwqe_mmap() - Provide contignous buffers to userspace
427  *
428  * We use mmap() to allocate contignous buffers used for DMA
429  * transfers. After the buffer is allocated we remap it to user-space
430  * and remember a reference to our dma_mapping data structure, where
431  * we store the associated DMA address and allocated size.
432  *
433  * When we receive a DDCB execution request with the ATS bits set to
434  * plain buffer, we lookup our dma_mapping list to find the
435  * corresponding DMA address for the associated user-space address.
436  */
437 static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
438 {
439         int rc;
440         unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
441         struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
442         struct genwqe_dev *cd = cfile->cd;
443         struct dma_mapping *dma_map;
444
445         if (vsize == 0)
446                 return -EINVAL;
447
448         if (get_order(vsize) > MAX_ORDER)
449                 return -ENOMEM;
450
451         dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
452         if (dma_map == NULL)
453                 return -ENOMEM;
454
455         genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
456         dma_map->u_vaddr = (void *)vma->vm_start;
457         dma_map->size = vsize;
458         dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
459         dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
460                                                      &dma_map->dma_addr);
461         if (dma_map->k_vaddr == NULL) {
462                 rc = -ENOMEM;
463                 goto free_dma_map;
464         }
465
466         if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
467                 *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
468
469         pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
470         rc = remap_pfn_range(vma,
471                              vma->vm_start,
472                              pfn,
473                              vsize,
474                              vma->vm_page_prot);
475         if (rc != 0) {
476                 rc = -EFAULT;
477                 goto free_dma_mem;
478         }
479
480         vma->vm_private_data = cfile;
481         vma->vm_ops = &genwqe_vma_ops;
482         __genwqe_add_mapping(cfile, dma_map);
483
484         return 0;
485
486  free_dma_mem:
487         __genwqe_free_consistent(cd, dma_map->size,
488                                 dma_map->k_vaddr,
489                                 dma_map->dma_addr);
490  free_dma_map:
491         kfree(dma_map);
492         return rc;
493 }
494
495 /**
496  * do_flash_update() - Excute flash update (write image or CVPD)
497  * @cd:        genwqe device
498  * @load:      details about image load
499  *
500  * Return: 0 if successful
501  */
502
503 #define FLASH_BLOCK     0x40000 /* we use 256k blocks */
504
505 static int do_flash_update(struct genwqe_file *cfile,
506                            struct genwqe_bitstream *load)
507 {
508         int rc = 0;
509         int blocks_to_flash;
510         dma_addr_t dma_addr;
511         u64 flash = 0;
512         size_t tocopy = 0;
513         u8 __user *buf;
514         u8 *xbuf;
515         u32 crc;
516         u8 cmdopts;
517         struct genwqe_dev *cd = cfile->cd;
518         struct file *filp = cfile->filp;
519         struct pci_dev *pci_dev = cd->pci_dev;
520
521         if ((load->size & 0x3) != 0)
522                 return -EINVAL;
523
524         if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
525                 return -EINVAL;
526
527         /* FIXME Bits have changed for new service layer! */
528         switch ((char)load->partition) {
529         case '0':
530                 cmdopts = 0x14;
531                 break;          /* download/erase_first/part_0 */
532         case '1':
533                 cmdopts = 0x1C;
534                 break;          /* download/erase_first/part_1 */
535         case 'v':
536                 cmdopts = 0x0C;
537                 break;          /* download/erase_first/vpd */
538         default:
539                 return -EINVAL;
540         }
541
542         buf = (u8 __user *)load->data_addr;
543         xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
544         if (xbuf == NULL)
545                 return -ENOMEM;
546
547         blocks_to_flash = load->size / FLASH_BLOCK;
548         while (load->size) {
549                 struct genwqe_ddcb_cmd *req;
550
551                 /*
552                  * We must be 4 byte aligned. Buffer must be 0 appened
553                  * to have defined values when calculating CRC.
554                  */
555                 tocopy = min_t(size_t, load->size, FLASH_BLOCK);
556
557                 rc = copy_from_user(xbuf, buf, tocopy);
558                 if (rc) {
559                         rc = -EFAULT;
560                         goto free_buffer;
561                 }
562                 crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
563
564                 dev_dbg(&pci_dev->dev,
565                         "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
566                         __func__, (unsigned long)dma_addr, crc, tocopy,
567                         blocks_to_flash);
568
569                 /* prepare DDCB for SLU process */
570                 req = ddcb_requ_alloc();
571                 if (req == NULL) {
572                         rc = -ENOMEM;
573                         goto free_buffer;
574                 }
575
576                 req->cmd = SLCMD_MOVE_FLASH;
577                 req->cmdopts = cmdopts;
578
579                 /* prepare invariant values */
580                 if (genwqe_get_slu_id(cd) <= 0x2) {
581                         *(__be64 *)&req->__asiv[0]  = cpu_to_be64(dma_addr);
582                         *(__be64 *)&req->__asiv[8]  = cpu_to_be64(tocopy);
583                         *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
584                         *(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
585                         req->__asiv[24]        = load->uid;
586                         *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
587
588                         /* for simulation only */
589                         *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
590                         *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
591                         req->asiv_length = 32; /* bytes included in crc calc */
592                 } else {        /* setup DDCB for ATS architecture */
593                         *(__be64 *)&req->asiv[0]  = cpu_to_be64(dma_addr);
594                         *(__be32 *)&req->asiv[8]  = cpu_to_be32(tocopy);
595                         *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
596                         *(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
597                         *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
598                         *(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
599
600                         /* for simulation only */
601                         *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
602                         *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
603
604                         /* Rd only */
605                         req->ats = 0x4ULL << 44;
606                         req->asiv_length = 40; /* bytes included in crc calc */
607                 }
608                 req->asv_length  = 8;
609
610                 /* For Genwqe5 we get back the calculated CRC */
611                 *(u64 *)&req->asv[0] = 0ULL;                    /* 0x80 */
612
613                 rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags);
614
615                 load->retc = req->retc;
616                 load->attn = req->attn;
617                 load->progress = req->progress;
618
619                 if (rc < 0) {
620                         ddcb_requ_free(req);
621                         goto free_buffer;
622                 }
623
624                 if (req->retc != DDCB_RETC_COMPLETE) {
625                         rc = -EIO;
626                         ddcb_requ_free(req);
627                         goto free_buffer;
628                 }
629
630                 load->size  -= tocopy;
631                 flash += tocopy;
632                 buf += tocopy;
633                 blocks_to_flash--;
634                 ddcb_requ_free(req);
635         }
636
637  free_buffer:
638         __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
639         return rc;
640 }
641
642 static int do_flash_read(struct genwqe_file *cfile,
643                          struct genwqe_bitstream *load)
644 {
645         int rc, blocks_to_flash;
646         dma_addr_t dma_addr;
647         u64 flash = 0;
648         size_t tocopy = 0;
649         u8 __user *buf;
650         u8 *xbuf;
651         u8 cmdopts;
652         struct genwqe_dev *cd = cfile->cd;
653         struct file *filp = cfile->filp;
654         struct pci_dev *pci_dev = cd->pci_dev;
655         struct genwqe_ddcb_cmd *cmd;
656
657         if ((load->size & 0x3) != 0)
658                 return -EINVAL;
659
660         if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
661                 return -EINVAL;
662
663         /* FIXME Bits have changed for new service layer! */
664         switch ((char)load->partition) {
665         case '0':
666                 cmdopts = 0x12;
667                 break;          /* upload/part_0 */
668         case '1':
669                 cmdopts = 0x1A;
670                 break;          /* upload/part_1 */
671         case 'v':
672                 cmdopts = 0x0A;
673                 break;          /* upload/vpd */
674         default:
675                 return -EINVAL;
676         }
677
678         buf = (u8 __user *)load->data_addr;
679         xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
680         if (xbuf == NULL)
681                 return -ENOMEM;
682
683         blocks_to_flash = load->size / FLASH_BLOCK;
684         while (load->size) {
685                 /*
686                  * We must be 4 byte aligned. Buffer must be 0 appened
687                  * to have defined values when calculating CRC.
688                  */
689                 tocopy = min_t(size_t, load->size, FLASH_BLOCK);
690
691                 dev_dbg(&pci_dev->dev,
692                         "[%s] DMA: %lx SZ: %ld %d\n",
693                         __func__, (unsigned long)dma_addr, tocopy,
694                         blocks_to_flash);
695
696                 /* prepare DDCB for SLU process */
697                 cmd = ddcb_requ_alloc();
698                 if (cmd == NULL) {
699                         rc = -ENOMEM;
700                         goto free_buffer;
701                 }
702                 cmd->cmd = SLCMD_MOVE_FLASH;
703                 cmd->cmdopts = cmdopts;
704
705                 /* prepare invariant values */
706                 if (genwqe_get_slu_id(cd) <= 0x2) {
707                         *(__be64 *)&cmd->__asiv[0]  = cpu_to_be64(dma_addr);
708                         *(__be64 *)&cmd->__asiv[8]  = cpu_to_be64(tocopy);
709                         *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
710                         *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
711                         cmd->__asiv[24] = load->uid;
712                         *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
713                         cmd->asiv_length = 32; /* bytes included in crc calc */
714                 } else {        /* setup DDCB for ATS architecture */
715                         *(__be64 *)&cmd->asiv[0]  = cpu_to_be64(dma_addr);
716                         *(__be32 *)&cmd->asiv[8]  = cpu_to_be32(tocopy);
717                         *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
718                         *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
719                         *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
720                         *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
721
722                         /* rd/wr */
723                         cmd->ats = 0x5ULL << 44;
724                         cmd->asiv_length = 40; /* bytes included in crc calc */
725                 }
726                 cmd->asv_length  = 8;
727
728                 /* we only get back the calculated CRC */
729                 *(u64 *)&cmd->asv[0] = 0ULL;    /* 0x80 */
730
731                 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
732
733                 load->retc = cmd->retc;
734                 load->attn = cmd->attn;
735                 load->progress = cmd->progress;
736
737                 if ((rc < 0) && (rc != -EBADMSG)) {
738                         ddcb_requ_free(cmd);
739                         goto free_buffer;
740                 }
741
742                 rc = copy_to_user(buf, xbuf, tocopy);
743                 if (rc) {
744                         rc = -EFAULT;
745                         ddcb_requ_free(cmd);
746                         goto free_buffer;
747                 }
748
749                 /* We know that we can get retc 0x104 with CRC err */
750                 if (((cmd->retc == DDCB_RETC_FAULT) &&
751                      (cmd->attn != 0x02)) ||  /* Normally ignore CRC error */
752                     ((cmd->retc == DDCB_RETC_COMPLETE) &&
753                      (cmd->attn != 0x00))) {  /* Everything was fine */
754                         rc = -EIO;
755                         ddcb_requ_free(cmd);
756                         goto free_buffer;
757                 }
758
759                 load->size  -= tocopy;
760                 flash += tocopy;
761                 buf += tocopy;
762                 blocks_to_flash--;
763                 ddcb_requ_free(cmd);
764         }
765         rc = 0;
766
767  free_buffer:
768         __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
769         return rc;
770 }
771
772 static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
773 {
774         int rc;
775         struct genwqe_dev *cd = cfile->cd;
776         struct pci_dev *pci_dev = cfile->cd->pci_dev;
777         struct dma_mapping *dma_map;
778         unsigned long map_addr;
779         unsigned long map_size;
780
781         if ((m->addr == 0x0) || (m->size == 0))
782                 return -EINVAL;
783
784         map_addr = (m->addr & PAGE_MASK);
785         map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
786
787         dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
788         if (dma_map == NULL)
789                 return -ENOMEM;
790
791         genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
792         rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
793         if (rc != 0) {
794                 dev_err(&pci_dev->dev,
795                         "[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
796                 kfree(dma_map);
797                 return rc;
798         }
799
800         genwqe_add_pin(cfile, dma_map);
801         return 0;
802 }
803
804 static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
805 {
806         struct genwqe_dev *cd = cfile->cd;
807         struct dma_mapping *dma_map;
808         unsigned long map_addr;
809         unsigned long map_size;
810
811         if (m->addr == 0x0)
812                 return -EINVAL;
813
814         map_addr = (m->addr & PAGE_MASK);
815         map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
816
817         dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
818         if (dma_map == NULL)
819                 return -ENOENT;
820
821         genwqe_del_pin(cfile, dma_map);
822         genwqe_user_vunmap(cd, dma_map);
823         kfree(dma_map);
824         return 0;
825 }
826
827 /**
828  * ddcb_cmd_cleanup() - Remove dynamically created fixup entries
829  *
830  * Only if there are any. Pinnings are not removed.
831  */
832 static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
833 {
834         unsigned int i;
835         struct dma_mapping *dma_map;
836         struct genwqe_dev *cd = cfile->cd;
837
838         for (i = 0; i < DDCB_FIXUPS; i++) {
839                 dma_map = &req->dma_mappings[i];
840
841                 if (dma_mapping_used(dma_map)) {
842                         __genwqe_del_mapping(cfile, dma_map);
843                         genwqe_user_vunmap(cd, dma_map);
844                 }
845                 if (req->sgls[i].sgl != NULL)
846                         genwqe_free_sync_sgl(cd, &req->sgls[i]);
847         }
848         return 0;
849 }
850
851 /**
852  * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
853  *
854  * Before the DDCB gets executed we need to handle the fixups. We
855  * replace the user-space addresses with DMA addresses or do
856  * additional setup work e.g. generating a scatter-gather list which
857  * is used to describe the memory referred to in the fixup.
858  */
859 static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
860 {
861         int rc;
862         unsigned int asiv_offs, i;
863         struct genwqe_dev *cd = cfile->cd;
864         struct genwqe_ddcb_cmd *cmd = &req->cmd;
865         struct dma_mapping *m;
866
867         for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
868              i++, asiv_offs += 0x08) {
869
870                 u64 u_addr;
871                 dma_addr_t d_addr;
872                 u32 u_size = 0;
873                 u64 ats_flags;
874
875                 ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
876
877                 switch (ats_flags) {
878
879                 case ATS_TYPE_DATA:
880                         break;  /* nothing to do here */
881
882                 case ATS_TYPE_FLAT_RDWR:
883                 case ATS_TYPE_FLAT_RD: {
884                         u_addr = be64_to_cpu(*((__be64 *)&cmd->
885                                                asiv[asiv_offs]));
886                         u_size = be32_to_cpu(*((__be32 *)&cmd->
887                                                asiv[asiv_offs + 0x08]));
888
889                         /*
890                          * No data available. Ignore u_addr in this
891                          * case and set addr to 0. Hardware must not
892                          * fetch the buffer.
893                          */
894                         if (u_size == 0x0) {
895                                 *((__be64 *)&cmd->asiv[asiv_offs]) =
896                                         cpu_to_be64(0x0);
897                                 break;
898                         }
899
900                         m = __genwqe_search_mapping(cfile, u_addr, u_size,
901                                                    &d_addr, NULL);
902                         if (m == NULL) {
903                                 rc = -EFAULT;
904                                 goto err_out;
905                         }
906
907                         *((__be64 *)&cmd->asiv[asiv_offs]) =
908                                 cpu_to_be64(d_addr);
909                         break;
910                 }
911
912                 case ATS_TYPE_SGL_RDWR:
913                 case ATS_TYPE_SGL_RD: {
914                         int page_offs;
915
916                         u_addr = be64_to_cpu(*((__be64 *)
917                                                &cmd->asiv[asiv_offs]));
918                         u_size = be32_to_cpu(*((__be32 *)
919                                                &cmd->asiv[asiv_offs + 0x08]));
920
921                         /*
922                          * No data available. Ignore u_addr in this
923                          * case and set addr to 0. Hardware must not
924                          * fetch the empty sgl.
925                          */
926                         if (u_size == 0x0) {
927                                 *((__be64 *)&cmd->asiv[asiv_offs]) =
928                                         cpu_to_be64(0x0);
929                                 break;
930                         }
931
932                         m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
933                         if (m != NULL) {
934                                 page_offs = (u_addr -
935                                              (u64)m->u_vaddr)/PAGE_SIZE;
936                         } else {
937                                 m = &req->dma_mappings[i];
938
939                                 genwqe_mapping_init(m,
940                                                     GENWQE_MAPPING_SGL_TEMP);
941
942                                 if (ats_flags == ATS_TYPE_SGL_RD)
943                                         m->write = 0;
944
945                                 rc = genwqe_user_vmap(cd, m, (void *)u_addr,
946                                                       u_size);
947                                 if (rc != 0)
948                                         goto err_out;
949
950                                 __genwqe_add_mapping(cfile, m);
951                                 page_offs = 0;
952                         }
953
954                         /* create genwqe style scatter gather list */
955                         rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
956                                                    (void __user *)u_addr,
957                                                    u_size, m->write);
958                         if (rc != 0)
959                                 goto err_out;
960
961                         genwqe_setup_sgl(cd, &req->sgls[i],
962                                          &m->dma_list[page_offs]);
963
964                         *((__be64 *)&cmd->asiv[asiv_offs]) =
965                                 cpu_to_be64(req->sgls[i].sgl_dma_addr);
966
967                         break;
968                 }
969                 default:
970                         rc = -EINVAL;
971                         goto err_out;
972                 }
973         }
974         return 0;
975
976  err_out:
977         ddcb_cmd_cleanup(cfile, req);
978         return rc;
979 }
980
981 /**
982  * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
983  *
984  * The code will build up the translation tables or lookup the
985  * contignous memory allocation table to find the right translations
986  * and DMA addresses.
987  */
988 static int genwqe_execute_ddcb(struct genwqe_file *cfile,
989                                struct genwqe_ddcb_cmd *cmd)
990 {
991         int rc;
992         struct genwqe_dev *cd = cfile->cd;
993         struct file *filp = cfile->filp;
994         struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
995
996         rc = ddcb_cmd_fixups(cfile, req);
997         if (rc != 0)
998                 return rc;
999
1000         rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1001         ddcb_cmd_cleanup(cfile, req);
1002         return rc;
1003 }
1004
1005 static int do_execute_ddcb(struct genwqe_file *cfile,
1006                            unsigned long arg, int raw)
1007 {
1008         int rc;
1009         struct genwqe_ddcb_cmd *cmd;
1010         struct genwqe_dev *cd = cfile->cd;
1011         struct file *filp = cfile->filp;
1012
1013         cmd = ddcb_requ_alloc();
1014         if (cmd == NULL)
1015                 return -ENOMEM;
1016
1017         if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
1018                 ddcb_requ_free(cmd);
1019                 return -EFAULT;
1020         }
1021
1022         if (!raw)
1023                 rc = genwqe_execute_ddcb(cfile, cmd);
1024         else
1025                 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1026
1027         /* Copy back only the modifed fields. Do not copy ASIV
1028            back since the copy got modified by the driver. */
1029         if (copy_to_user((void __user *)arg, cmd,
1030                          sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
1031                 ddcb_requ_free(cmd);
1032                 return -EFAULT;
1033         }
1034
1035         ddcb_requ_free(cmd);
1036         return rc;
1037 }
1038
1039 /**
1040  * genwqe_ioctl() - IO control
1041  * @filp:       file handle
1042  * @cmd:        command identifier (passed from user)
1043  * @arg:        argument (passed from user)
1044  *
1045  * Return: 0 success
1046  */
1047 static long genwqe_ioctl(struct file *filp, unsigned int cmd,
1048                          unsigned long arg)
1049 {
1050         int rc = 0;
1051         struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
1052         struct genwqe_dev *cd = cfile->cd;
1053         struct pci_dev *pci_dev = cd->pci_dev;
1054         struct genwqe_reg_io __user *io;
1055         u64 val;
1056         u32 reg_offs;
1057
1058         /* Return -EIO if card hit EEH */
1059         if (pci_channel_offline(pci_dev))
1060                 return -EIO;
1061
1062         if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
1063                 return -EINVAL;
1064
1065         switch (cmd) {
1066
1067         case GENWQE_GET_CARD_STATE:
1068                 put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
1069                 return 0;
1070
1071                 /* Register access */
1072         case GENWQE_READ_REG64: {
1073                 io = (struct genwqe_reg_io __user *)arg;
1074
1075                 if (get_user(reg_offs, &io->num))
1076                         return -EFAULT;
1077
1078                 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1079                         return -EINVAL;
1080
1081                 val = __genwqe_readq(cd, reg_offs);
1082                 put_user(val, &io->val64);
1083                 return 0;
1084         }
1085
1086         case GENWQE_WRITE_REG64: {
1087                 io = (struct genwqe_reg_io __user *)arg;
1088
1089                 if (!capable(CAP_SYS_ADMIN))
1090                         return -EPERM;
1091
1092                 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1093                         return -EPERM;
1094
1095                 if (get_user(reg_offs, &io->num))
1096                         return -EFAULT;
1097
1098                 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1099                         return -EINVAL;
1100
1101                 if (get_user(val, &io->val64))
1102                         return -EFAULT;
1103
1104                 __genwqe_writeq(cd, reg_offs, val);
1105                 return 0;
1106         }
1107
1108         case GENWQE_READ_REG32: {
1109                 io = (struct genwqe_reg_io __user *)arg;
1110
1111                 if (get_user(reg_offs, &io->num))
1112                         return -EFAULT;
1113
1114                 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1115                         return -EINVAL;
1116
1117                 val = __genwqe_readl(cd, reg_offs);
1118                 put_user(val, &io->val64);
1119                 return 0;
1120         }
1121
1122         case GENWQE_WRITE_REG32: {
1123                 io = (struct genwqe_reg_io __user *)arg;
1124
1125                 if (!capable(CAP_SYS_ADMIN))
1126                         return -EPERM;
1127
1128                 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1129                         return -EPERM;
1130
1131                 if (get_user(reg_offs, &io->num))
1132                         return -EFAULT;
1133
1134                 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1135                         return -EINVAL;
1136
1137                 if (get_user(val, &io->val64))
1138                         return -EFAULT;
1139
1140                 __genwqe_writel(cd, reg_offs, val);
1141                 return 0;
1142         }
1143
1144                 /* Flash update/reading */
1145         case GENWQE_SLU_UPDATE: {
1146                 struct genwqe_bitstream load;
1147
1148                 if (!genwqe_is_privileged(cd))
1149                         return -EPERM;
1150
1151                 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1152                         return -EPERM;
1153
1154                 if (copy_from_user(&load, (void __user *)arg,
1155                                    sizeof(load)))
1156                         return -EFAULT;
1157
1158                 rc = do_flash_update(cfile, &load);
1159
1160                 if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1161                         return -EFAULT;
1162
1163                 return rc;
1164         }
1165
1166         case GENWQE_SLU_READ: {
1167                 struct genwqe_bitstream load;
1168
1169                 if (!genwqe_is_privileged(cd))
1170                         return -EPERM;
1171
1172                 if (genwqe_flash_readback_fails(cd))
1173                         return -ENOSPC;  /* known to fail for old versions */
1174
1175                 if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
1176                         return -EFAULT;
1177
1178                 rc = do_flash_read(cfile, &load);
1179
1180                 if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1181                         return -EFAULT;
1182
1183                 return rc;
1184         }
1185
1186                 /* memory pinning and unpinning */
1187         case GENWQE_PIN_MEM: {
1188                 struct genwqe_mem m;
1189
1190                 if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1191                         return -EFAULT;
1192
1193                 return genwqe_pin_mem(cfile, &m);
1194         }
1195
1196         case GENWQE_UNPIN_MEM: {
1197                 struct genwqe_mem m;
1198
1199                 if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1200                         return -EFAULT;
1201
1202                 return genwqe_unpin_mem(cfile, &m);
1203         }
1204
1205                 /* launch an DDCB and wait for completion */
1206         case GENWQE_EXECUTE_DDCB:
1207                 return do_execute_ddcb(cfile, arg, 0);
1208
1209         case GENWQE_EXECUTE_RAW_DDCB: {
1210
1211                 if (!capable(CAP_SYS_ADMIN))
1212                         return -EPERM;
1213
1214                 return do_execute_ddcb(cfile, arg, 1);
1215         }
1216
1217         default:
1218                 return -EINVAL;
1219         }
1220
1221         return rc;
1222 }
1223
1224 #if defined(CONFIG_COMPAT)
1225 /**
1226  * genwqe_compat_ioctl() - Compatibility ioctl
1227  *
1228  * Called whenever a 32-bit process running under a 64-bit kernel
1229  * performs an ioctl on /dev/genwqe<n>_card.
1230  *
1231  * @filp:        file pointer.
1232  * @cmd:         command.
1233  * @arg:         user argument.
1234  * Return:       zero on success or negative number on failure.
1235  */
1236 static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
1237                                 unsigned long arg)
1238 {
1239         return genwqe_ioctl(filp, cmd, arg);
1240 }
1241 #endif /* defined(CONFIG_COMPAT) */
1242
1243 static const struct file_operations genwqe_fops = {
1244         .owner          = THIS_MODULE,
1245         .open           = genwqe_open,
1246         .fasync         = genwqe_fasync,
1247         .mmap           = genwqe_mmap,
1248         .unlocked_ioctl = genwqe_ioctl,
1249 #if defined(CONFIG_COMPAT)
1250         .compat_ioctl   = genwqe_compat_ioctl,
1251 #endif
1252         .release        = genwqe_release,
1253 };
1254
1255 static int genwqe_device_initialized(struct genwqe_dev *cd)
1256 {
1257         return cd->dev != NULL;
1258 }
1259
1260 /**
1261  * genwqe_device_create() - Create and configure genwqe char device
1262  * @cd:      genwqe device descriptor
1263  *
1264  * This function must be called before we create any more genwqe
1265  * character devices, because it is allocating the major and minor
1266  * number which are supposed to be used by the client drivers.
1267  */
1268 int genwqe_device_create(struct genwqe_dev *cd)
1269 {
1270         int rc;
1271         struct pci_dev *pci_dev = cd->pci_dev;
1272
1273         /*
1274          * Here starts the individual setup per client. It must
1275          * initialize its own cdev data structure with its own fops.
1276          * The appropriate devnum needs to be created. The ranges must
1277          * not overlap.
1278          */
1279         rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
1280                                  GENWQE_MAX_MINOR, GENWQE_DEVNAME);
1281         if (rc < 0) {
1282                 dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
1283                 goto err_dev;
1284         }
1285
1286         cdev_init(&cd->cdev_genwqe, &genwqe_fops);
1287         cd->cdev_genwqe.owner = THIS_MODULE;
1288
1289         rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
1290         if (rc < 0) {
1291                 dev_err(&pci_dev->dev, "err: cdev_add failed\n");
1292                 goto err_add;
1293         }
1294
1295         /*
1296          * Finally the device in /dev/... must be created. The rule is
1297          * to use card%d_clientname for each created device.
1298          */
1299         cd->dev = device_create_with_groups(cd->class_genwqe,
1300                                             &cd->pci_dev->dev,
1301                                             cd->devnum_genwqe, cd,
1302                                             genwqe_attribute_groups,
1303                                             GENWQE_DEVNAME "%u_card",
1304                                             cd->card_idx);
1305         if (IS_ERR(cd->dev)) {
1306                 rc = PTR_ERR(cd->dev);
1307                 goto err_cdev;
1308         }
1309
1310         rc = genwqe_init_debugfs(cd);
1311         if (rc != 0)
1312                 goto err_debugfs;
1313
1314         return 0;
1315
1316  err_debugfs:
1317         device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1318  err_cdev:
1319         cdev_del(&cd->cdev_genwqe);
1320  err_add:
1321         unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1322  err_dev:
1323         cd->dev = NULL;
1324         return rc;
1325 }
1326
1327 static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
1328 {
1329         int rc;
1330         unsigned int i;
1331         struct pci_dev *pci_dev = cd->pci_dev;
1332
1333         if (!genwqe_open_files(cd))
1334                 return 0;
1335
1336         dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
1337
1338         rc = genwqe_kill_fasync(cd, SIGIO);
1339         if (rc > 0) {
1340                 /* give kill_timeout seconds to close file descriptors ... */
1341                 for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1342                              genwqe_open_files(cd); i++) {
1343                         dev_info(&pci_dev->dev, "  %d sec ...", i);
1344
1345                         cond_resched();
1346                         msleep(1000);
1347                 }
1348
1349                 /* if no open files we can safely continue, else ... */
1350                 if (!genwqe_open_files(cd))
1351                         return 0;
1352
1353                 dev_warn(&pci_dev->dev,
1354                          "[%s] send SIGKILL and wait ...\n", __func__);
1355
1356                 rc = genwqe_terminate(cd);
1357                 if (rc) {
1358                         /* Give kill_timout more seconds to end processes */
1359                         for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1360                                      genwqe_open_files(cd); i++) {
1361                                 dev_warn(&pci_dev->dev, "  %d sec ...", i);
1362
1363                                 cond_resched();
1364                                 msleep(1000);
1365                         }
1366                 }
1367         }
1368         return 0;
1369 }
1370
1371 /**
1372  * genwqe_device_remove() - Remove genwqe's char device
1373  *
1374  * This function must be called after the client devices are removed
1375  * because it will free the major/minor number range for the genwqe
1376  * drivers.
1377  *
1378  * This function must be robust enough to be called twice.
1379  */
1380 int genwqe_device_remove(struct genwqe_dev *cd)
1381 {
1382         int rc;
1383         struct pci_dev *pci_dev = cd->pci_dev;
1384
1385         if (!genwqe_device_initialized(cd))
1386                 return 1;
1387
1388         genwqe_inform_and_stop_processes(cd);
1389
1390         /*
1391          * We currently do wait until all filedescriptors are
1392          * closed. This leads to a problem when we abort the
1393          * application which will decrease this reference from
1394          * 1/unused to 0/illegal and not from 2/used 1/empty.
1395          */
1396         rc = kref_read(&cd->cdev_genwqe.kobj.kref);
1397         if (rc != 1) {
1398                 dev_err(&pci_dev->dev,
1399                         "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
1400                 panic("Fatal err: cannot free resources with pending references!");
1401         }
1402
1403         genqwe_exit_debugfs(cd);
1404         device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1405         cdev_del(&cd->cdev_genwqe);
1406         unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1407         cd->dev = NULL;
1408
1409         return 0;
1410 }