1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * IBM Accelerator Family 'GenWQE'
4   *
5   * (C) Copyright IBM Corp. 2013
6   *
7   * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
8   * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
9   * Author: Michael Jung <mijung@gmx.net>
10   * Author: Michael Ruettger <michael@ibmra.de>
11   */
12  
13  /*
14   * Character device representation of the GenWQE device. This allows
15   * user-space applications to communicate with the card.
16   */
17  
18  #include <linux/kernel.h>
19  #include <linux/types.h>
20  #include <linux/module.h>
21  #include <linux/pci.h>
22  #include <linux/string.h>
23  #include <linux/fs.h>
24  #include <linux/sched/signal.h>
25  #include <linux/wait.h>
26  #include <linux/delay.h>
27  #include <linux/atomic.h>
28  
29  #include "card_base.h"
30  #include "card_ddcb.h"
31  
genwqe_open_files(struct genwqe_dev * cd)32  static int genwqe_open_files(struct genwqe_dev *cd)
33  {
34  	int rc;
35  	unsigned long flags;
36  
37  	spin_lock_irqsave(&cd->file_lock, flags);
38  	rc = list_empty(&cd->file_list);
39  	spin_unlock_irqrestore(&cd->file_lock, flags);
40  	return !rc;
41  }
42  
genwqe_add_file(struct genwqe_dev * cd,struct genwqe_file * cfile)43  static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
44  {
45  	unsigned long flags;
46  
47  	cfile->opener = get_pid(task_tgid(current));
48  	spin_lock_irqsave(&cd->file_lock, flags);
49  	list_add(&cfile->list, &cd->file_list);
50  	spin_unlock_irqrestore(&cd->file_lock, flags);
51  }
52  
genwqe_del_file(struct genwqe_dev * cd,struct genwqe_file * cfile)53  static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
54  {
55  	unsigned long flags;
56  
57  	spin_lock_irqsave(&cd->file_lock, flags);
58  	list_del(&cfile->list);
59  	spin_unlock_irqrestore(&cd->file_lock, flags);
60  	put_pid(cfile->opener);
61  
62  	return 0;
63  }
64  
genwqe_add_pin(struct genwqe_file * cfile,struct dma_mapping * m)65  static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
66  {
67  	unsigned long flags;
68  
69  	spin_lock_irqsave(&cfile->pin_lock, flags);
70  	list_add(&m->pin_list, &cfile->pin_list);
71  	spin_unlock_irqrestore(&cfile->pin_lock, flags);
72  }
73  
genwqe_del_pin(struct genwqe_file * cfile,struct dma_mapping * m)74  static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
75  {
76  	unsigned long flags;
77  
78  	spin_lock_irqsave(&cfile->pin_lock, flags);
79  	list_del(&m->pin_list);
80  	spin_unlock_irqrestore(&cfile->pin_lock, flags);
81  
82  	return 0;
83  }
84  
85  /**
86   * genwqe_search_pin() - Search for the mapping for a userspace address
87   * @cfile:	Descriptor of opened file
88   * @u_addr:	User virtual address
89   * @size:	Size of buffer
90   * @virt_addr:	Virtual address to be updated
91   *
92   * Return: Pointer to the corresponding mapping	NULL if not found
93   */
genwqe_search_pin(struct genwqe_file * cfile,unsigned long u_addr,unsigned int size,void ** virt_addr)94  static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
95  					    unsigned long u_addr,
96  					    unsigned int size,
97  					    void **virt_addr)
98  {
99  	unsigned long flags;
100  	struct dma_mapping *m;
101  
102  	spin_lock_irqsave(&cfile->pin_lock, flags);
103  
104  	list_for_each_entry(m, &cfile->pin_list, pin_list) {
105  		if ((((u64)m->u_vaddr) <= (u_addr)) &&
106  		    (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
107  
108  			if (virt_addr)
109  				*virt_addr = m->k_vaddr +
110  					(u_addr - (u64)m->u_vaddr);
111  
112  			spin_unlock_irqrestore(&cfile->pin_lock, flags);
113  			return m;
114  		}
115  	}
116  	spin_unlock_irqrestore(&cfile->pin_lock, flags);
117  	return NULL;
118  }
119  
__genwqe_add_mapping(struct genwqe_file * cfile,struct dma_mapping * dma_map)120  static void __genwqe_add_mapping(struct genwqe_file *cfile,
121  			      struct dma_mapping *dma_map)
122  {
123  	unsigned long flags;
124  
125  	spin_lock_irqsave(&cfile->map_lock, flags);
126  	list_add(&dma_map->card_list, &cfile->map_list);
127  	spin_unlock_irqrestore(&cfile->map_lock, flags);
128  }
129  
__genwqe_del_mapping(struct genwqe_file * cfile,struct dma_mapping * dma_map)130  static void __genwqe_del_mapping(struct genwqe_file *cfile,
131  			      struct dma_mapping *dma_map)
132  {
133  	unsigned long flags;
134  
135  	spin_lock_irqsave(&cfile->map_lock, flags);
136  	list_del(&dma_map->card_list);
137  	spin_unlock_irqrestore(&cfile->map_lock, flags);
138  }
139  
140  
141  /**
142   * __genwqe_search_mapping() - Search for the mapping for a userspace address
143   * @cfile:	descriptor of opened file
144   * @u_addr:	user virtual address
145   * @size:	size of buffer
146   * @dma_addr:	DMA address to be updated
147   * @virt_addr:	Virtual address to be updated
148   * Return: Pointer to the corresponding mapping	NULL if not found
149   */
__genwqe_search_mapping(struct genwqe_file * cfile,unsigned long u_addr,unsigned int size,dma_addr_t * dma_addr,void ** virt_addr)150  static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
151  						   unsigned long u_addr,
152  						   unsigned int size,
153  						   dma_addr_t *dma_addr,
154  						   void **virt_addr)
155  {
156  	unsigned long flags;
157  	struct dma_mapping *m;
158  	struct pci_dev *pci_dev = cfile->cd->pci_dev;
159  
160  	spin_lock_irqsave(&cfile->map_lock, flags);
161  	list_for_each_entry(m, &cfile->map_list, card_list) {
162  
163  		if ((((u64)m->u_vaddr) <= (u_addr)) &&
164  		    (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
165  
166  			/* match found: current is as expected and
167  			   addr is in range */
168  			if (dma_addr)
169  				*dma_addr = m->dma_addr +
170  					(u_addr - (u64)m->u_vaddr);
171  
172  			if (virt_addr)
173  				*virt_addr = m->k_vaddr +
174  					(u_addr - (u64)m->u_vaddr);
175  
176  			spin_unlock_irqrestore(&cfile->map_lock, flags);
177  			return m;
178  		}
179  	}
180  	spin_unlock_irqrestore(&cfile->map_lock, flags);
181  
182  	dev_err(&pci_dev->dev,
183  		"[%s] Entry not found: u_addr=%lx, size=%x\n",
184  		__func__, u_addr, size);
185  
186  	return NULL;
187  }
188  
genwqe_remove_mappings(struct genwqe_file * cfile)189  static void genwqe_remove_mappings(struct genwqe_file *cfile)
190  {
191  	int i = 0;
192  	struct list_head *node, *next;
193  	struct dma_mapping *dma_map;
194  	struct genwqe_dev *cd = cfile->cd;
195  	struct pci_dev *pci_dev = cfile->cd->pci_dev;
196  
197  	list_for_each_safe(node, next, &cfile->map_list) {
198  		dma_map = list_entry(node, struct dma_mapping, card_list);
199  
200  		list_del_init(&dma_map->card_list);
201  
202  		/*
203  		 * This is really a bug, because those things should
204  		 * have been already tidied up.
205  		 *
206  		 * GENWQE_MAPPING_RAW should have been removed via mmunmap().
207  		 * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
208  		 */
209  		dev_err(&pci_dev->dev,
210  			"[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n",
211  			__func__, i++, dma_map->u_vaddr,
212  			(unsigned long)dma_map->k_vaddr,
213  			(unsigned long)dma_map->dma_addr);
214  
215  		if (dma_map->type == GENWQE_MAPPING_RAW) {
216  			/* we allocated this dynamically */
217  			__genwqe_free_consistent(cd, dma_map->size,
218  						dma_map->k_vaddr,
219  						dma_map->dma_addr);
220  			kfree(dma_map);
221  		} else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
222  			/* we use dma_map statically from the request */
223  			genwqe_user_vunmap(cd, dma_map);
224  		}
225  	}
226  }
227  
genwqe_remove_pinnings(struct genwqe_file * cfile)228  static void genwqe_remove_pinnings(struct genwqe_file *cfile)
229  {
230  	struct list_head *node, *next;
231  	struct dma_mapping *dma_map;
232  	struct genwqe_dev *cd = cfile->cd;
233  
234  	list_for_each_safe(node, next, &cfile->pin_list) {
235  		dma_map = list_entry(node, struct dma_mapping, pin_list);
236  
237  		/*
238  		 * This is not a bug, because a killed processed might
239  		 * not call the unpin ioctl, which is supposed to free
240  		 * the resources.
241  		 *
242  		 * Pinnings are dymically allocated and need to be
243  		 * deleted.
244  		 */
245  		list_del_init(&dma_map->pin_list);
246  		genwqe_user_vunmap(cd, dma_map);
247  		kfree(dma_map);
248  	}
249  }
250  
251  /**
252   * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
253   * @cd: GenWQE device information
254   * @sig: Signal to send out
255   *
256   * E.g. genwqe_send_signal(cd, SIGIO);
257   */
genwqe_kill_fasync(struct genwqe_dev * cd,int sig)258  static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
259  {
260  	unsigned int files = 0;
261  	unsigned long flags;
262  	struct genwqe_file *cfile;
263  
264  	spin_lock_irqsave(&cd->file_lock, flags);
265  	list_for_each_entry(cfile, &cd->file_list, list) {
266  		if (cfile->async_queue)
267  			kill_fasync(&cfile->async_queue, sig, POLL_HUP);
268  		files++;
269  	}
270  	spin_unlock_irqrestore(&cd->file_lock, flags);
271  	return files;
272  }
273  
genwqe_terminate(struct genwqe_dev * cd)274  static int genwqe_terminate(struct genwqe_dev *cd)
275  {
276  	unsigned int files = 0;
277  	unsigned long flags;
278  	struct genwqe_file *cfile;
279  
280  	spin_lock_irqsave(&cd->file_lock, flags);
281  	list_for_each_entry(cfile, &cd->file_list, list) {
282  		kill_pid(cfile->opener, SIGKILL, 1);
283  		files++;
284  	}
285  	spin_unlock_irqrestore(&cd->file_lock, flags);
286  	return files;
287  }
288  
289  /**
290   * genwqe_open() - file open
291   * @inode:      file system information
292   * @filp:	file handle
293   *
294   * This function is executed whenever an application calls
295   * open("/dev/genwqe",..).
296   *
297   * Return: 0 if successful or <0 if errors
298   */
genwqe_open(struct inode * inode,struct file * filp)299  static int genwqe_open(struct inode *inode, struct file *filp)
300  {
301  	struct genwqe_dev *cd;
302  	struct genwqe_file *cfile;
303  
304  	cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
305  	if (cfile == NULL)
306  		return -ENOMEM;
307  
308  	cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
309  	cfile->cd = cd;
310  	cfile->filp = filp;
311  	cfile->client = NULL;
312  
313  	spin_lock_init(&cfile->map_lock);  /* list of raw memory allocations */
314  	INIT_LIST_HEAD(&cfile->map_list);
315  
316  	spin_lock_init(&cfile->pin_lock);  /* list of user pinned memory */
317  	INIT_LIST_HEAD(&cfile->pin_list);
318  
319  	filp->private_data = cfile;
320  
321  	genwqe_add_file(cd, cfile);
322  	return 0;
323  }
324  
325  /**
326   * genwqe_fasync() - Setup process to receive SIGIO.
327   * @fd:        file descriptor
328   * @filp:      file handle
329   * @mode:      file mode
330   *
331   * Sending a signal is working as following:
332   *
333   * if (cdev->async_queue)
334   *         kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
335   *
336   * Some devices also implement asynchronous notification to indicate
337   * when the device can be written; in this case, of course,
338   * kill_fasync must be called with a mode of POLL_OUT.
339   */
genwqe_fasync(int fd,struct file * filp,int mode)340  static int genwqe_fasync(int fd, struct file *filp, int mode)
341  {
342  	struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
343  
344  	return fasync_helper(fd, filp, mode, &cdev->async_queue);
345  }
346  
347  
348  /**
349   * genwqe_release() - file close
350   * @inode:      file system information
351   * @filp:       file handle
352   *
353   * This function is executed whenever an application calls 'close(fd_genwqe)'
354   *
355   * Return: always 0
356   */
genwqe_release(struct inode * inode,struct file * filp)357  static int genwqe_release(struct inode *inode, struct file *filp)
358  {
359  	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
360  	struct genwqe_dev *cd = cfile->cd;
361  
362  	/* there must be no entries in these lists! */
363  	genwqe_remove_mappings(cfile);
364  	genwqe_remove_pinnings(cfile);
365  
366  	/* remove this filp from the asynchronously notified filp's */
367  	genwqe_fasync(-1, filp, 0);
368  
369  	/*
370  	 * For this to work we must not release cd when this cfile is
371  	 * not yet released, otherwise the list entry is invalid,
372  	 * because the list itself gets reinstantiated!
373  	 */
374  	genwqe_del_file(cd, cfile);
375  	kfree(cfile);
376  	return 0;
377  }
378  
genwqe_vma_open(struct vm_area_struct * vma)379  static void genwqe_vma_open(struct vm_area_struct *vma)
380  {
381  	/* nothing ... */
382  }
383  
384  /**
385   * genwqe_vma_close() - Called each time when vma is unmapped
386   * @vma: VMA area to close
387   *
388   * Free memory which got allocated by GenWQE mmap().
389   */
genwqe_vma_close(struct vm_area_struct * vma)390  static void genwqe_vma_close(struct vm_area_struct *vma)
391  {
392  	unsigned long vsize = vma->vm_end - vma->vm_start;
393  	struct inode *inode = file_inode(vma->vm_file);
394  	struct dma_mapping *dma_map;
395  	struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
396  					    cdev_genwqe);
397  	struct pci_dev *pci_dev = cd->pci_dev;
398  	dma_addr_t d_addr = 0;
399  	struct genwqe_file *cfile = vma->vm_private_data;
400  
401  	dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
402  					 &d_addr, NULL);
403  	if (dma_map == NULL) {
404  		dev_err(&pci_dev->dev,
405  			"  [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
406  			__func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
407  			vsize);
408  		return;
409  	}
410  	__genwqe_del_mapping(cfile, dma_map);
411  	__genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
412  				 dma_map->dma_addr);
413  	kfree(dma_map);
414  }
415  
416  static const struct vm_operations_struct genwqe_vma_ops = {
417  	.open   = genwqe_vma_open,
418  	.close  = genwqe_vma_close,
419  };
420  
421  /**
422   * genwqe_mmap() - Provide contignous buffers to userspace
423   * @filp:	File pointer (unused)
424   * @vma:	VMA area to map
425   *
426   * We use mmap() to allocate contignous buffers used for DMA
427   * transfers. After the buffer is allocated we remap it to user-space
428   * and remember a reference to our dma_mapping data structure, where
429   * we store the associated DMA address and allocated size.
430   *
431   * When we receive a DDCB execution request with the ATS bits set to
432   * plain buffer, we lookup our dma_mapping list to find the
433   * corresponding DMA address for the associated user-space address.
434   */
genwqe_mmap(struct file * filp,struct vm_area_struct * vma)435  static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
436  {
437  	int rc;
438  	unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
439  	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
440  	struct genwqe_dev *cd = cfile->cd;
441  	struct dma_mapping *dma_map;
442  
443  	if (vsize == 0)
444  		return -EINVAL;
445  
446  	if (get_order(vsize) > MAX_PAGE_ORDER)
447  		return -ENOMEM;
448  
449  	dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
450  	if (dma_map == NULL)
451  		return -ENOMEM;
452  
453  	genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
454  	dma_map->u_vaddr = (void *)vma->vm_start;
455  	dma_map->size = vsize;
456  	dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
457  	dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
458  						     &dma_map->dma_addr);
459  	if (dma_map->k_vaddr == NULL) {
460  		rc = -ENOMEM;
461  		goto free_dma_map;
462  	}
463  
464  	if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
465  		*(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
466  
467  	pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
468  	rc = remap_pfn_range(vma,
469  			     vma->vm_start,
470  			     pfn,
471  			     vsize,
472  			     vma->vm_page_prot);
473  	if (rc != 0) {
474  		rc = -EFAULT;
475  		goto free_dma_mem;
476  	}
477  
478  	vma->vm_private_data = cfile;
479  	vma->vm_ops = &genwqe_vma_ops;
480  	__genwqe_add_mapping(cfile, dma_map);
481  
482  	return 0;
483  
484   free_dma_mem:
485  	__genwqe_free_consistent(cd, dma_map->size,
486  				dma_map->k_vaddr,
487  				dma_map->dma_addr);
488   free_dma_map:
489  	kfree(dma_map);
490  	return rc;
491  }
492  
493  #define	FLASH_BLOCK	0x40000	/* we use 256k blocks */
494  
495  /**
496   * do_flash_update() - Excute flash update (write image or CVPD)
497   * @cfile:	Descriptor of opened file
498   * @load:      details about image load
499   *
500   * Return: 0 if successful
501   */
do_flash_update(struct genwqe_file * cfile,struct genwqe_bitstream * load)502  static int do_flash_update(struct genwqe_file *cfile,
503  			   struct genwqe_bitstream *load)
504  {
505  	int rc = 0;
506  	int blocks_to_flash;
507  	dma_addr_t dma_addr;
508  	u64 flash = 0;
509  	size_t tocopy = 0;
510  	u8 __user *buf;
511  	u8 *xbuf;
512  	u32 crc;
513  	u8 cmdopts;
514  	struct genwqe_dev *cd = cfile->cd;
515  	struct file *filp = cfile->filp;
516  	struct pci_dev *pci_dev = cd->pci_dev;
517  
518  	if ((load->size & 0x3) != 0)
519  		return -EINVAL;
520  
521  	if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
522  		return -EINVAL;
523  
524  	/* FIXME Bits have changed for new service layer! */
525  	switch ((char)load->partition) {
526  	case '0':
527  		cmdopts = 0x14;
528  		break;		/* download/erase_first/part_0 */
529  	case '1':
530  		cmdopts = 0x1C;
531  		break;		/* download/erase_first/part_1 */
532  	case 'v':
533  		cmdopts = 0x0C;
534  		break;		/* download/erase_first/vpd */
535  	default:
536  		return -EINVAL;
537  	}
538  
539  	buf = (u8 __user *)load->data_addr;
540  	xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
541  	if (xbuf == NULL)
542  		return -ENOMEM;
543  
544  	blocks_to_flash = load->size / FLASH_BLOCK;
545  	while (load->size) {
546  		struct genwqe_ddcb_cmd *req;
547  
548  		/*
549  		 * We must be 4 byte aligned. Buffer must be 0 appened
550  		 * to have defined values when calculating CRC.
551  		 */
552  		tocopy = min_t(size_t, load->size, FLASH_BLOCK);
553  
554  		rc = copy_from_user(xbuf, buf, tocopy);
555  		if (rc) {
556  			rc = -EFAULT;
557  			goto free_buffer;
558  		}
559  		crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
560  
561  		dev_dbg(&pci_dev->dev,
562  			"[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
563  			__func__, (unsigned long)dma_addr, crc, tocopy,
564  			blocks_to_flash);
565  
566  		/* prepare DDCB for SLU process */
567  		req = ddcb_requ_alloc();
568  		if (req == NULL) {
569  			rc = -ENOMEM;
570  			goto free_buffer;
571  		}
572  
573  		req->cmd = SLCMD_MOVE_FLASH;
574  		req->cmdopts = cmdopts;
575  
576  		/* prepare invariant values */
577  		if (genwqe_get_slu_id(cd) <= 0x2) {
578  			*(__be64 *)&req->__asiv[0]  = cpu_to_be64(dma_addr);
579  			*(__be64 *)&req->__asiv[8]  = cpu_to_be64(tocopy);
580  			*(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
581  			*(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
582  			req->__asiv[24]	       = load->uid;
583  			*(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
584  
585  			/* for simulation only */
586  			*(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
587  			*(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
588  			req->asiv_length = 32; /* bytes included in crc calc */
589  		} else {	/* setup DDCB for ATS architecture */
590  			*(__be64 *)&req->asiv[0]  = cpu_to_be64(dma_addr);
591  			*(__be32 *)&req->asiv[8]  = cpu_to_be32(tocopy);
592  			*(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
593  			*(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
594  			*(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
595  			*(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
596  
597  			/* for simulation only */
598  			*(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
599  			*(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
600  
601  			/* Rd only */
602  			req->ats = 0x4ULL << 44;
603  			req->asiv_length = 40; /* bytes included in crc calc */
604  		}
605  		req->asv_length  = 8;
606  
607  		/* For Genwqe5 we get back the calculated CRC */
608  		*(u64 *)&req->asv[0] = 0ULL;			/* 0x80 */
609  
610  		rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags);
611  
612  		load->retc = req->retc;
613  		load->attn = req->attn;
614  		load->progress = req->progress;
615  
616  		if (rc < 0) {
617  			ddcb_requ_free(req);
618  			goto free_buffer;
619  		}
620  
621  		if (req->retc != DDCB_RETC_COMPLETE) {
622  			rc = -EIO;
623  			ddcb_requ_free(req);
624  			goto free_buffer;
625  		}
626  
627  		load->size  -= tocopy;
628  		flash += tocopy;
629  		buf += tocopy;
630  		blocks_to_flash--;
631  		ddcb_requ_free(req);
632  	}
633  
634   free_buffer:
635  	__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
636  	return rc;
637  }
638  
do_flash_read(struct genwqe_file * cfile,struct genwqe_bitstream * load)639  static int do_flash_read(struct genwqe_file *cfile,
640  			 struct genwqe_bitstream *load)
641  {
642  	int rc, blocks_to_flash;
643  	dma_addr_t dma_addr;
644  	u64 flash = 0;
645  	size_t tocopy = 0;
646  	u8 __user *buf;
647  	u8 *xbuf;
648  	u8 cmdopts;
649  	struct genwqe_dev *cd = cfile->cd;
650  	struct file *filp = cfile->filp;
651  	struct pci_dev *pci_dev = cd->pci_dev;
652  	struct genwqe_ddcb_cmd *cmd;
653  
654  	if ((load->size & 0x3) != 0)
655  		return -EINVAL;
656  
657  	if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
658  		return -EINVAL;
659  
660  	/* FIXME Bits have changed for new service layer! */
661  	switch ((char)load->partition) {
662  	case '0':
663  		cmdopts = 0x12;
664  		break;		/* upload/part_0 */
665  	case '1':
666  		cmdopts = 0x1A;
667  		break;		/* upload/part_1 */
668  	case 'v':
669  		cmdopts = 0x0A;
670  		break;		/* upload/vpd */
671  	default:
672  		return -EINVAL;
673  	}
674  
675  	buf = (u8 __user *)load->data_addr;
676  	xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
677  	if (xbuf == NULL)
678  		return -ENOMEM;
679  
680  	blocks_to_flash = load->size / FLASH_BLOCK;
681  	while (load->size) {
682  		/*
683  		 * We must be 4 byte aligned. Buffer must be 0 appened
684  		 * to have defined values when calculating CRC.
685  		 */
686  		tocopy = min_t(size_t, load->size, FLASH_BLOCK);
687  
688  		dev_dbg(&pci_dev->dev,
689  			"[%s] DMA: %lx SZ: %ld %d\n",
690  			__func__, (unsigned long)dma_addr, tocopy,
691  			blocks_to_flash);
692  
693  		/* prepare DDCB for SLU process */
694  		cmd = ddcb_requ_alloc();
695  		if (cmd == NULL) {
696  			rc = -ENOMEM;
697  			goto free_buffer;
698  		}
699  		cmd->cmd = SLCMD_MOVE_FLASH;
700  		cmd->cmdopts = cmdopts;
701  
702  		/* prepare invariant values */
703  		if (genwqe_get_slu_id(cd) <= 0x2) {
704  			*(__be64 *)&cmd->__asiv[0]  = cpu_to_be64(dma_addr);
705  			*(__be64 *)&cmd->__asiv[8]  = cpu_to_be64(tocopy);
706  			*(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
707  			*(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
708  			cmd->__asiv[24] = load->uid;
709  			*(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
710  			cmd->asiv_length = 32; /* bytes included in crc calc */
711  		} else {	/* setup DDCB for ATS architecture */
712  			*(__be64 *)&cmd->asiv[0]  = cpu_to_be64(dma_addr);
713  			*(__be32 *)&cmd->asiv[8]  = cpu_to_be32(tocopy);
714  			*(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
715  			*(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
716  			*(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
717  			*(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
718  
719  			/* rd/wr */
720  			cmd->ats = 0x5ULL << 44;
721  			cmd->asiv_length = 40; /* bytes included in crc calc */
722  		}
723  		cmd->asv_length  = 8;
724  
725  		/* we only get back the calculated CRC */
726  		*(u64 *)&cmd->asv[0] = 0ULL;	/* 0x80 */
727  
728  		rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
729  
730  		load->retc = cmd->retc;
731  		load->attn = cmd->attn;
732  		load->progress = cmd->progress;
733  
734  		if ((rc < 0) && (rc != -EBADMSG)) {
735  			ddcb_requ_free(cmd);
736  			goto free_buffer;
737  		}
738  
739  		rc = copy_to_user(buf, xbuf, tocopy);
740  		if (rc) {
741  			rc = -EFAULT;
742  			ddcb_requ_free(cmd);
743  			goto free_buffer;
744  		}
745  
746  		/* We know that we can get retc 0x104 with CRC err */
747  		if (((cmd->retc == DDCB_RETC_FAULT) &&
748  		     (cmd->attn != 0x02)) ||  /* Normally ignore CRC error */
749  		    ((cmd->retc == DDCB_RETC_COMPLETE) &&
750  		     (cmd->attn != 0x00))) {  /* Everything was fine */
751  			rc = -EIO;
752  			ddcb_requ_free(cmd);
753  			goto free_buffer;
754  		}
755  
756  		load->size  -= tocopy;
757  		flash += tocopy;
758  		buf += tocopy;
759  		blocks_to_flash--;
760  		ddcb_requ_free(cmd);
761  	}
762  	rc = 0;
763  
764   free_buffer:
765  	__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
766  	return rc;
767  }
768  
genwqe_pin_mem(struct genwqe_file * cfile,struct genwqe_mem * m)769  static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
770  {
771  	int rc;
772  	struct genwqe_dev *cd = cfile->cd;
773  	struct pci_dev *pci_dev = cfile->cd->pci_dev;
774  	struct dma_mapping *dma_map;
775  	unsigned long map_addr;
776  	unsigned long map_size;
777  
778  	if ((m->addr == 0x0) || (m->size == 0))
779  		return -EINVAL;
780  	if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
781  		return -EINVAL;
782  
783  	map_addr = (m->addr & PAGE_MASK);
784  	map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
785  
786  	dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
787  	if (dma_map == NULL)
788  		return -ENOMEM;
789  
790  	genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
791  	rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
792  	if (rc != 0) {
793  		dev_err(&pci_dev->dev,
794  			"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
795  		kfree(dma_map);
796  		return rc;
797  	}
798  
799  	genwqe_add_pin(cfile, dma_map);
800  	return 0;
801  }
802  
genwqe_unpin_mem(struct genwqe_file * cfile,struct genwqe_mem * m)803  static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
804  {
805  	struct genwqe_dev *cd = cfile->cd;
806  	struct dma_mapping *dma_map;
807  	unsigned long map_addr;
808  	unsigned long map_size;
809  
810  	if (m->addr == 0x0)
811  		return -EINVAL;
812  
813  	map_addr = (m->addr & PAGE_MASK);
814  	map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
815  
816  	dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
817  	if (dma_map == NULL)
818  		return -ENOENT;
819  
820  	genwqe_del_pin(cfile, dma_map);
821  	genwqe_user_vunmap(cd, dma_map);
822  	kfree(dma_map);
823  	return 0;
824  }
825  
826  /**
827   * ddcb_cmd_cleanup() - Remove dynamically created fixup entries
828   * @cfile:	Descriptor of opened file
829   * @req:	DDCB work request
830   *
831   * Only if there are any. Pinnings are not removed.
832   */
ddcb_cmd_cleanup(struct genwqe_file * cfile,struct ddcb_requ * req)833  static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
834  {
835  	unsigned int i;
836  	struct dma_mapping *dma_map;
837  	struct genwqe_dev *cd = cfile->cd;
838  
839  	for (i = 0; i < DDCB_FIXUPS; i++) {
840  		dma_map = &req->dma_mappings[i];
841  
842  		if (dma_mapping_used(dma_map)) {
843  			__genwqe_del_mapping(cfile, dma_map);
844  			genwqe_user_vunmap(cd, dma_map);
845  		}
846  		if (req->sgls[i].sgl != NULL)
847  			genwqe_free_sync_sgl(cd, &req->sgls[i]);
848  	}
849  	return 0;
850  }
851  
852  /**
853   * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
854   * @cfile:	Descriptor of opened file
855   * @req:	DDCB work request
856   *
857   * Before the DDCB gets executed we need to handle the fixups. We
858   * replace the user-space addresses with DMA addresses or do
859   * additional setup work e.g. generating a scatter-gather list which
860   * is used to describe the memory referred to in the fixup.
861   */
ddcb_cmd_fixups(struct genwqe_file * cfile,struct ddcb_requ * req)862  static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
863  {
864  	int rc;
865  	unsigned int asiv_offs, i;
866  	struct genwqe_dev *cd = cfile->cd;
867  	struct genwqe_ddcb_cmd *cmd = &req->cmd;
868  	struct dma_mapping *m;
869  
870  	for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
871  	     i++, asiv_offs += 0x08) {
872  
873  		u64 u_addr;
874  		dma_addr_t d_addr;
875  		u32 u_size = 0;
876  		u64 ats_flags;
877  
878  		ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
879  
880  		switch (ats_flags) {
881  
882  		case ATS_TYPE_DATA:
883  			break;	/* nothing to do here */
884  
885  		case ATS_TYPE_FLAT_RDWR:
886  		case ATS_TYPE_FLAT_RD: {
887  			u_addr = be64_to_cpu(*((__be64 *)&cmd->
888  					       asiv[asiv_offs]));
889  			u_size = be32_to_cpu(*((__be32 *)&cmd->
890  					       asiv[asiv_offs + 0x08]));
891  
892  			/*
893  			 * No data available. Ignore u_addr in this
894  			 * case and set addr to 0. Hardware must not
895  			 * fetch the buffer.
896  			 */
897  			if (u_size == 0x0) {
898  				*((__be64 *)&cmd->asiv[asiv_offs]) =
899  					cpu_to_be64(0x0);
900  				break;
901  			}
902  
903  			m = __genwqe_search_mapping(cfile, u_addr, u_size,
904  						   &d_addr, NULL);
905  			if (m == NULL) {
906  				rc = -EFAULT;
907  				goto err_out;
908  			}
909  
910  			*((__be64 *)&cmd->asiv[asiv_offs]) =
911  				cpu_to_be64(d_addr);
912  			break;
913  		}
914  
915  		case ATS_TYPE_SGL_RDWR:
916  		case ATS_TYPE_SGL_RD: {
917  			int page_offs;
918  
919  			u_addr = be64_to_cpu(*((__be64 *)
920  					       &cmd->asiv[asiv_offs]));
921  			u_size = be32_to_cpu(*((__be32 *)
922  					       &cmd->asiv[asiv_offs + 0x08]));
923  
924  			/*
925  			 * No data available. Ignore u_addr in this
926  			 * case and set addr to 0. Hardware must not
927  			 * fetch the empty sgl.
928  			 */
929  			if (u_size == 0x0) {
930  				*((__be64 *)&cmd->asiv[asiv_offs]) =
931  					cpu_to_be64(0x0);
932  				break;
933  			}
934  
935  			m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
936  			if (m != NULL) {
937  				page_offs = (u_addr -
938  					     (u64)m->u_vaddr)/PAGE_SIZE;
939  			} else {
940  				m = &req->dma_mappings[i];
941  
942  				genwqe_mapping_init(m,
943  						    GENWQE_MAPPING_SGL_TEMP);
944  
945  				if (ats_flags == ATS_TYPE_SGL_RD)
946  					m->write = 0;
947  
948  				rc = genwqe_user_vmap(cd, m, (void *)u_addr,
949  						      u_size);
950  				if (rc != 0)
951  					goto err_out;
952  
953  				__genwqe_add_mapping(cfile, m);
954  				page_offs = 0;
955  			}
956  
957  			/* create genwqe style scatter gather list */
958  			rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
959  						   (void __user *)u_addr,
960  						   u_size, m->write);
961  			if (rc != 0)
962  				goto err_out;
963  
964  			genwqe_setup_sgl(cd, &req->sgls[i],
965  					 &m->dma_list[page_offs]);
966  
967  			*((__be64 *)&cmd->asiv[asiv_offs]) =
968  				cpu_to_be64(req->sgls[i].sgl_dma_addr);
969  
970  			break;
971  		}
972  		default:
973  			rc = -EINVAL;
974  			goto err_out;
975  		}
976  	}
977  	return 0;
978  
979   err_out:
980  	ddcb_cmd_cleanup(cfile, req);
981  	return rc;
982  }
983  
984  /**
985   * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
986   * @cfile:	Descriptor of opened file
987   * @cmd:        Command identifier (passed from user)
988   *
989   * The code will build up the translation tables or lookup the
990   * contignous memory allocation table to find the right translations
991   * and DMA addresses.
992   */
genwqe_execute_ddcb(struct genwqe_file * cfile,struct genwqe_ddcb_cmd * cmd)993  static int genwqe_execute_ddcb(struct genwqe_file *cfile,
994  			       struct genwqe_ddcb_cmd *cmd)
995  {
996  	int rc;
997  	struct genwqe_dev *cd = cfile->cd;
998  	struct file *filp = cfile->filp;
999  	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
1000  
1001  	rc = ddcb_cmd_fixups(cfile, req);
1002  	if (rc != 0)
1003  		return rc;
1004  
1005  	rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1006  	ddcb_cmd_cleanup(cfile, req);
1007  	return rc;
1008  }
1009  
do_execute_ddcb(struct genwqe_file * cfile,unsigned long arg,int raw)1010  static int do_execute_ddcb(struct genwqe_file *cfile,
1011  			   unsigned long arg, int raw)
1012  {
1013  	int rc;
1014  	struct genwqe_ddcb_cmd *cmd;
1015  	struct genwqe_dev *cd = cfile->cd;
1016  	struct file *filp = cfile->filp;
1017  
1018  	cmd = ddcb_requ_alloc();
1019  	if (cmd == NULL)
1020  		return -ENOMEM;
1021  
1022  	if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
1023  		ddcb_requ_free(cmd);
1024  		return -EFAULT;
1025  	}
1026  
1027  	if (!raw)
1028  		rc = genwqe_execute_ddcb(cfile, cmd);
1029  	else
1030  		rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1031  
1032  	/* Copy back only the modifed fields. Do not copy ASIV
1033  	   back since the copy got modified by the driver. */
1034  	if (copy_to_user((void __user *)arg, cmd,
1035  			 sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
1036  		ddcb_requ_free(cmd);
1037  		return -EFAULT;
1038  	}
1039  
1040  	ddcb_requ_free(cmd);
1041  	return rc;
1042  }
1043  
1044  /**
1045   * genwqe_ioctl() - IO control
1046   * @filp:       file handle
1047   * @cmd:        command identifier (passed from user)
1048   * @arg:        argument (passed from user)
1049   *
1050   * Return: 0 success
1051   */
genwqe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1052  static long genwqe_ioctl(struct file *filp, unsigned int cmd,
1053  			 unsigned long arg)
1054  {
1055  	int rc = 0;
1056  	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
1057  	struct genwqe_dev *cd = cfile->cd;
1058  	struct pci_dev *pci_dev = cd->pci_dev;
1059  	struct genwqe_reg_io __user *io;
1060  	u64 val;
1061  	u32 reg_offs;
1062  
1063  	/* Return -EIO if card hit EEH */
1064  	if (pci_channel_offline(pci_dev))
1065  		return -EIO;
1066  
1067  	if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
1068  		return -EINVAL;
1069  
1070  	switch (cmd) {
1071  
1072  	case GENWQE_GET_CARD_STATE:
1073  		put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
1074  		return 0;
1075  
1076  		/* Register access */
1077  	case GENWQE_READ_REG64: {
1078  		io = (struct genwqe_reg_io __user *)arg;
1079  
1080  		if (get_user(reg_offs, &io->num))
1081  			return -EFAULT;
1082  
1083  		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1084  			return -EINVAL;
1085  
1086  		val = __genwqe_readq(cd, reg_offs);
1087  		put_user(val, &io->val64);
1088  		return 0;
1089  	}
1090  
1091  	case GENWQE_WRITE_REG64: {
1092  		io = (struct genwqe_reg_io __user *)arg;
1093  
1094  		if (!capable(CAP_SYS_ADMIN))
1095  			return -EPERM;
1096  
1097  		if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1098  			return -EPERM;
1099  
1100  		if (get_user(reg_offs, &io->num))
1101  			return -EFAULT;
1102  
1103  		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1104  			return -EINVAL;
1105  
1106  		if (get_user(val, &io->val64))
1107  			return -EFAULT;
1108  
1109  		__genwqe_writeq(cd, reg_offs, val);
1110  		return 0;
1111  	}
1112  
1113  	case GENWQE_READ_REG32: {
1114  		io = (struct genwqe_reg_io __user *)arg;
1115  
1116  		if (get_user(reg_offs, &io->num))
1117  			return -EFAULT;
1118  
1119  		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1120  			return -EINVAL;
1121  
1122  		val = __genwqe_readl(cd, reg_offs);
1123  		put_user(val, &io->val64);
1124  		return 0;
1125  	}
1126  
1127  	case GENWQE_WRITE_REG32: {
1128  		io = (struct genwqe_reg_io __user *)arg;
1129  
1130  		if (!capable(CAP_SYS_ADMIN))
1131  			return -EPERM;
1132  
1133  		if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1134  			return -EPERM;
1135  
1136  		if (get_user(reg_offs, &io->num))
1137  			return -EFAULT;
1138  
1139  		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1140  			return -EINVAL;
1141  
1142  		if (get_user(val, &io->val64))
1143  			return -EFAULT;
1144  
1145  		__genwqe_writel(cd, reg_offs, val);
1146  		return 0;
1147  	}
1148  
1149  		/* Flash update/reading */
1150  	case GENWQE_SLU_UPDATE: {
1151  		struct genwqe_bitstream load;
1152  
1153  		if (!genwqe_is_privileged(cd))
1154  			return -EPERM;
1155  
1156  		if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1157  			return -EPERM;
1158  
1159  		if (copy_from_user(&load, (void __user *)arg,
1160  				   sizeof(load)))
1161  			return -EFAULT;
1162  
1163  		rc = do_flash_update(cfile, &load);
1164  
1165  		if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1166  			return -EFAULT;
1167  
1168  		return rc;
1169  	}
1170  
1171  	case GENWQE_SLU_READ: {
1172  		struct genwqe_bitstream load;
1173  
1174  		if (!genwqe_is_privileged(cd))
1175  			return -EPERM;
1176  
1177  		if (genwqe_flash_readback_fails(cd))
1178  			return -ENOSPC;	 /* known to fail for old versions */
1179  
1180  		if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
1181  			return -EFAULT;
1182  
1183  		rc = do_flash_read(cfile, &load);
1184  
1185  		if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1186  			return -EFAULT;
1187  
1188  		return rc;
1189  	}
1190  
1191  		/* memory pinning and unpinning */
1192  	case GENWQE_PIN_MEM: {
1193  		struct genwqe_mem m;
1194  
1195  		if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1196  			return -EFAULT;
1197  
1198  		return genwqe_pin_mem(cfile, &m);
1199  	}
1200  
1201  	case GENWQE_UNPIN_MEM: {
1202  		struct genwqe_mem m;
1203  
1204  		if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1205  			return -EFAULT;
1206  
1207  		return genwqe_unpin_mem(cfile, &m);
1208  	}
1209  
1210  		/* launch an DDCB and wait for completion */
1211  	case GENWQE_EXECUTE_DDCB:
1212  		return do_execute_ddcb(cfile, arg, 0);
1213  
1214  	case GENWQE_EXECUTE_RAW_DDCB: {
1215  
1216  		if (!capable(CAP_SYS_ADMIN))
1217  			return -EPERM;
1218  
1219  		return do_execute_ddcb(cfile, arg, 1);
1220  	}
1221  
1222  	default:
1223  		return -EINVAL;
1224  	}
1225  
1226  	return rc;
1227  }
1228  
1229  static const struct file_operations genwqe_fops = {
1230  	.owner		= THIS_MODULE,
1231  	.open		= genwqe_open,
1232  	.fasync		= genwqe_fasync,
1233  	.mmap		= genwqe_mmap,
1234  	.unlocked_ioctl	= genwqe_ioctl,
1235  	.compat_ioctl   = compat_ptr_ioctl,
1236  	.release	= genwqe_release,
1237  };
1238  
genwqe_device_initialized(struct genwqe_dev * cd)1239  static int genwqe_device_initialized(struct genwqe_dev *cd)
1240  {
1241  	return cd->dev != NULL;
1242  }
1243  
1244  /**
1245   * genwqe_device_create() - Create and configure genwqe char device
1246   * @cd:      genwqe device descriptor
1247   *
1248   * This function must be called before we create any more genwqe
1249   * character devices, because it is allocating the major and minor
1250   * number which are supposed to be used by the client drivers.
1251   */
genwqe_device_create(struct genwqe_dev * cd)1252  int genwqe_device_create(struct genwqe_dev *cd)
1253  {
1254  	int rc;
1255  	struct pci_dev *pci_dev = cd->pci_dev;
1256  
1257  	/*
1258  	 * Here starts the individual setup per client. It must
1259  	 * initialize its own cdev data structure with its own fops.
1260  	 * The appropriate devnum needs to be created. The ranges must
1261  	 * not overlap.
1262  	 */
1263  	rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
1264  				 GENWQE_MAX_MINOR, GENWQE_DEVNAME);
1265  	if (rc < 0) {
1266  		dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
1267  		goto err_dev;
1268  	}
1269  
1270  	cdev_init(&cd->cdev_genwqe, &genwqe_fops);
1271  	cd->cdev_genwqe.owner = THIS_MODULE;
1272  
1273  	rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
1274  	if (rc < 0) {
1275  		dev_err(&pci_dev->dev, "err: cdev_add failed\n");
1276  		goto err_add;
1277  	}
1278  
1279  	/*
1280  	 * Finally the device in /dev/... must be created. The rule is
1281  	 * to use card%d_clientname for each created device.
1282  	 */
1283  	cd->dev = device_create_with_groups(cd->class_genwqe,
1284  					    &cd->pci_dev->dev,
1285  					    cd->devnum_genwqe, cd,
1286  					    genwqe_attribute_groups,
1287  					    GENWQE_DEVNAME "%u_card",
1288  					    cd->card_idx);
1289  	if (IS_ERR(cd->dev)) {
1290  		rc = PTR_ERR(cd->dev);
1291  		goto err_cdev;
1292  	}
1293  
1294  	genwqe_init_debugfs(cd);
1295  
1296  	return 0;
1297  
1298   err_cdev:
1299  	cdev_del(&cd->cdev_genwqe);
1300   err_add:
1301  	unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1302   err_dev:
1303  	cd->dev = NULL;
1304  	return rc;
1305  }
1306  
genwqe_inform_and_stop_processes(struct genwqe_dev * cd)1307  static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
1308  {
1309  	int rc;
1310  	unsigned int i;
1311  	struct pci_dev *pci_dev = cd->pci_dev;
1312  
1313  	if (!genwqe_open_files(cd))
1314  		return 0;
1315  
1316  	dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
1317  
1318  	rc = genwqe_kill_fasync(cd, SIGIO);
1319  	if (rc > 0) {
1320  		/* give kill_timeout seconds to close file descriptors ... */
1321  		for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1322  			     genwqe_open_files(cd); i++) {
1323  			dev_info(&pci_dev->dev, "  %d sec ...", i);
1324  
1325  			cond_resched();
1326  			msleep(1000);
1327  		}
1328  
1329  		/* if no open files we can safely continue, else ... */
1330  		if (!genwqe_open_files(cd))
1331  			return 0;
1332  
1333  		dev_warn(&pci_dev->dev,
1334  			 "[%s] send SIGKILL and wait ...\n", __func__);
1335  
1336  		rc = genwqe_terminate(cd);
1337  		if (rc) {
1338  			/* Give kill_timout more seconds to end processes */
1339  			for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1340  				     genwqe_open_files(cd); i++) {
1341  				dev_warn(&pci_dev->dev, "  %d sec ...", i);
1342  
1343  				cond_resched();
1344  				msleep(1000);
1345  			}
1346  		}
1347  	}
1348  	return 0;
1349  }
1350  
1351  /**
1352   * genwqe_device_remove() - Remove genwqe's char device
1353   * @cd: GenWQE device information
1354   *
1355   * This function must be called after the client devices are removed
1356   * because it will free the major/minor number range for the genwqe
1357   * drivers.
1358   *
1359   * This function must be robust enough to be called twice.
1360   */
genwqe_device_remove(struct genwqe_dev * cd)1361  int genwqe_device_remove(struct genwqe_dev *cd)
1362  {
1363  	int rc;
1364  	struct pci_dev *pci_dev = cd->pci_dev;
1365  
1366  	if (!genwqe_device_initialized(cd))
1367  		return 1;
1368  
1369  	genwqe_inform_and_stop_processes(cd);
1370  
1371  	/*
1372  	 * We currently do wait until all filedescriptors are
1373  	 * closed. This leads to a problem when we abort the
1374  	 * application which will decrease this reference from
1375  	 * 1/unused to 0/illegal and not from 2/used 1/empty.
1376  	 */
1377  	rc = kref_read(&cd->cdev_genwqe.kobj.kref);
1378  	if (rc != 1) {
1379  		dev_err(&pci_dev->dev,
1380  			"[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
1381  		panic("Fatal err: cannot free resources with pending references!");
1382  	}
1383  
1384  	genqwe_exit_debugfs(cd);
1385  	device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1386  	cdev_del(&cd->cdev_genwqe);
1387  	unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1388  	cd->dev = NULL;
1389  
1390  	return 0;
1391  }
1392