1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * VMEbus User access driver
4  *
5  * Author: Martyn Welch <martyn.welch@ge.com>
6  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7  *
8  * Based on work by:
9  *   Tom Armistead and Ajit Prem
10  *     Copyright 2004 Motorola Inc.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/refcount.h>
16 #include <linux/cdev.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/ioctl.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/pagemap.h>
27 #include <linux/pci.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/syscalls.h>
32 #include <linux/types.h>
33 
34 #include <linux/io.h>
35 #include <linux/uaccess.h>
36 
37 #include "vme.h"
38 #include "vme_user.h"
39 
40 #define DRIVER_NAME "vme_user"
41 
42 static int bus[VME_USER_BUS_MAX];
43 static unsigned int bus_num;
44 
45 /* Currently Documentation/admin-guide/devices.rst defines the
46  * following for VME:
47  *
48  * 221 char	VME bus
49  *		  0 = /dev/bus/vme/m0		First master image
50  *		  1 = /dev/bus/vme/m1		Second master image
51  *		  2 = /dev/bus/vme/m2		Third master image
52  *		  3 = /dev/bus/vme/m3		Fourth master image
53  *		  4 = /dev/bus/vme/s0		First slave image
54  *		  5 = /dev/bus/vme/s1		Second slave image
55  *		  6 = /dev/bus/vme/s2		Third slave image
56  *		  7 = /dev/bus/vme/s3		Fourth slave image
57  *		  8 = /dev/bus/vme/ctl		Control
58  *
59  *		It is expected that all VME bus drivers will use the
60  *		same interface.  For interface documentation see
61  *		http://www.vmelinux.org/.
62  *
63  * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
64  * even support the tsi148 chipset (which has 8 master and 8 slave windows).
65  * We'll run with this for now as far as possible, however it probably makes
66  * sense to get rid of the old mappings and just do everything dynamically.
67  *
68  * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
69  * defined above and try to support at least some of the interface from
70  * http://www.vmelinux.org/ as an alternative the driver can be written
71  * providing a saner interface later.
72  *
73  * The vmelinux.org driver never supported slave images, the devices reserved
74  * for slaves were repurposed to support all 8 master images on the UniverseII!
75  * We shall support 4 masters and 4 slaves with this driver.
76  */
77 #define VME_MAJOR	221	/* VME Major Device Number */
78 #define VME_DEVS	9	/* Number of dev entries */
79 
80 #define MASTER_MINOR	0
81 #define MASTER_MAX	3
82 #define SLAVE_MINOR	4
83 #define SLAVE_MAX	7
84 #define CONTROL_MINOR	8
85 
86 #define PCI_BUF_SIZE  0x20000	/* Size of one slave image buffer */
87 
88 /*
89  * Structure to handle image related parameters.
90  */
91 struct image_desc {
92 	void *kern_buf;	/* Buffer address in kernel space */
93 	dma_addr_t pci_buf;	/* Buffer address in PCI address space */
94 	unsigned long long size_buf;	/* Buffer size */
95 	struct mutex mutex;	/* Mutex for locking image */
96 	struct device *device;	/* Sysfs device */
97 	struct vme_resource *resource;	/* VME resource */
98 	int mmap_count;		/* Number of current mmap's */
99 };
100 
101 static struct image_desc image[VME_DEVS];
102 
103 static struct cdev *vme_user_cdev;		/* Character device */
104 static struct vme_dev *vme_user_bridge;		/* Pointer to user device */
105 
106 static const struct class vme_user_sysfs_class = {
107 	.name = DRIVER_NAME,
108 };
109 
110 static const int type[VME_DEVS] = {	MASTER_MINOR,	MASTER_MINOR,
111 					MASTER_MINOR,	MASTER_MINOR,
112 					SLAVE_MINOR,	SLAVE_MINOR,
113 					SLAVE_MINOR,	SLAVE_MINOR,
114 					CONTROL_MINOR
115 				};
116 
117 struct vme_user_vma_priv {
118 	unsigned int minor;
119 	refcount_t refcnt;
120 };
121 
resource_to_user(int minor,char __user * buf,size_t count,loff_t * ppos)122 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
123 				loff_t *ppos)
124 {
125 	ssize_t copied = 0;
126 
127 	if (count > image[minor].size_buf)
128 		count = image[minor].size_buf;
129 
130 	copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
131 				 count, *ppos);
132 	if (copied < 0)
133 		return (int)copied;
134 
135 	if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
136 		return -EFAULT;
137 
138 	return copied;
139 }
140 
resource_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)141 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
142 				  size_t count, loff_t *ppos)
143 {
144 	if (count > image[minor].size_buf)
145 		count = image[minor].size_buf;
146 
147 	if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
148 		return -EFAULT;
149 
150 	return vme_master_write(image[minor].resource, image[minor].kern_buf,
151 				count, *ppos);
152 }
153 
buffer_to_user(unsigned int minor,char __user * buf,size_t count,loff_t * ppos)154 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
155 			      size_t count, loff_t *ppos)
156 {
157 	void *image_ptr;
158 
159 	image_ptr = image[minor].kern_buf + *ppos;
160 	if (copy_to_user(buf, image_ptr, (unsigned long)count))
161 		return -EFAULT;
162 
163 	return count;
164 }
165 
buffer_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)166 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
167 				size_t count, loff_t *ppos)
168 {
169 	void *image_ptr;
170 
171 	image_ptr = image[minor].kern_buf + *ppos;
172 	if (copy_from_user(image_ptr, buf, (unsigned long)count))
173 		return -EFAULT;
174 
175 	return count;
176 }
177 
vme_user_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)178 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
179 			     loff_t *ppos)
180 {
181 	unsigned int minor = iminor(file_inode(file));
182 	ssize_t retval;
183 	size_t image_size;
184 
185 	if (minor == CONTROL_MINOR)
186 		return 0;
187 
188 	mutex_lock(&image[minor].mutex);
189 
190 	/* XXX Do we *really* want this helper - we can use vme_*_get ? */
191 	image_size = vme_get_size(image[minor].resource);
192 
193 	/* Ensure we are starting at a valid location */
194 	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
195 		mutex_unlock(&image[minor].mutex);
196 		return 0;
197 	}
198 
199 	/* Ensure not reading past end of the image */
200 	if (*ppos + count > image_size)
201 		count = image_size - *ppos;
202 
203 	switch (type[minor]) {
204 	case MASTER_MINOR:
205 		retval = resource_to_user(minor, buf, count, ppos);
206 		break;
207 	case SLAVE_MINOR:
208 		retval = buffer_to_user(minor, buf, count, ppos);
209 		break;
210 	default:
211 		retval = -EINVAL;
212 	}
213 
214 	mutex_unlock(&image[minor].mutex);
215 	if (retval > 0)
216 		*ppos += retval;
217 
218 	return retval;
219 }
220 
vme_user_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)221 static ssize_t vme_user_write(struct file *file, const char __user *buf,
222 			      size_t count, loff_t *ppos)
223 {
224 	unsigned int minor = iminor(file_inode(file));
225 	ssize_t retval;
226 	size_t image_size;
227 
228 	if (minor == CONTROL_MINOR)
229 		return 0;
230 
231 	mutex_lock(&image[minor].mutex);
232 
233 	image_size = vme_get_size(image[minor].resource);
234 
235 	/* Ensure we are starting at a valid location */
236 	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
237 		mutex_unlock(&image[minor].mutex);
238 		return 0;
239 	}
240 
241 	/* Ensure not reading past end of the image */
242 	if (*ppos + count > image_size)
243 		count = image_size - *ppos;
244 
245 	switch (type[minor]) {
246 	case MASTER_MINOR:
247 		retval = resource_from_user(minor, buf, count, ppos);
248 		break;
249 	case SLAVE_MINOR:
250 		retval = buffer_from_user(minor, buf, count, ppos);
251 		break;
252 	default:
253 		retval = -EINVAL;
254 	}
255 
256 	mutex_unlock(&image[minor].mutex);
257 
258 	if (retval > 0)
259 		*ppos += retval;
260 
261 	return retval;
262 }
263 
vme_user_llseek(struct file * file,loff_t off,int whence)264 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
265 {
266 	unsigned int minor = iminor(file_inode(file));
267 	size_t image_size;
268 	loff_t res;
269 
270 	switch (type[minor]) {
271 	case MASTER_MINOR:
272 	case SLAVE_MINOR:
273 		mutex_lock(&image[minor].mutex);
274 		image_size = vme_get_size(image[minor].resource);
275 		res = fixed_size_llseek(file, off, whence, image_size);
276 		mutex_unlock(&image[minor].mutex);
277 		return res;
278 	}
279 
280 	return -EINVAL;
281 }
282 
283 /*
284  * The ioctls provided by the old VME access method (the one at vmelinux.org)
285  * are most certainly wrong as the effectively push the registers layout
286  * through to user space. Given that the VME core can handle multiple bridges,
287  * with different register layouts this is most certainly not the way to go.
288  *
289  * We aren't using the structures defined in the Motorola driver either - these
290  * are also quite low level, however we should use the definitions that have
291  * already been defined.
292  */
vme_user_ioctl(struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)293 static int vme_user_ioctl(struct inode *inode, struct file *file,
294 			  unsigned int cmd, unsigned long arg)
295 {
296 	struct vme_master master;
297 	struct vme_slave slave;
298 	struct vme_irq_id irq_req;
299 	unsigned long copied;
300 	unsigned int minor = iminor(inode);
301 	int retval;
302 	dma_addr_t pci_addr;
303 	void __user *argp = (void __user *)arg;
304 
305 	switch (type[minor]) {
306 	case CONTROL_MINOR:
307 		switch (cmd) {
308 		case VME_IRQ_GEN:
309 			copied = copy_from_user(&irq_req, argp,
310 						sizeof(irq_req));
311 			if (copied) {
312 				pr_warn("Partial copy from userspace\n");
313 				return -EFAULT;
314 			}
315 
316 			return vme_irq_generate(vme_user_bridge,
317 						  irq_req.level,
318 						  irq_req.statid);
319 		}
320 		break;
321 	case MASTER_MINOR:
322 		switch (cmd) {
323 		case VME_GET_MASTER:
324 			memset(&master, 0, sizeof(master));
325 
326 			/* XXX	We do not want to push aspace, cycle and width
327 			 *	to userspace as they are
328 			 */
329 			retval = vme_master_get(image[minor].resource,
330 						&master.enable,
331 						&master.vme_addr,
332 						&master.size, &master.aspace,
333 						&master.cycle, &master.dwidth);
334 
335 			copied = copy_to_user(argp, &master,
336 					      sizeof(master));
337 			if (copied) {
338 				pr_warn("Partial copy to userspace\n");
339 				return -EFAULT;
340 			}
341 
342 			return retval;
343 
344 		case VME_SET_MASTER:
345 
346 			if (image[minor].mmap_count != 0) {
347 				pr_warn("Can't adjust mapped window\n");
348 				return -EPERM;
349 			}
350 
351 			copied = copy_from_user(&master, argp, sizeof(master));
352 			if (copied) {
353 				pr_warn("Partial copy from userspace\n");
354 				return -EFAULT;
355 			}
356 
357 			/* XXX	We do not want to push aspace, cycle and width
358 			 *	to userspace as they are
359 			 */
360 			return vme_master_set(image[minor].resource,
361 				master.enable, master.vme_addr, master.size,
362 				master.aspace, master.cycle, master.dwidth);
363 
364 			break;
365 		}
366 		break;
367 	case SLAVE_MINOR:
368 		switch (cmd) {
369 		case VME_GET_SLAVE:
370 			memset(&slave, 0, sizeof(slave));
371 
372 			/* XXX	We do not want to push aspace, cycle and width
373 			 *	to userspace as they are
374 			 */
375 			retval = vme_slave_get(image[minor].resource,
376 					       &slave.enable, &slave.vme_addr,
377 					       &slave.size, &pci_addr,
378 					       &slave.aspace, &slave.cycle);
379 
380 			copied = copy_to_user(argp, &slave,
381 					      sizeof(slave));
382 			if (copied) {
383 				pr_warn("Partial copy to userspace\n");
384 				return -EFAULT;
385 			}
386 
387 			return retval;
388 
389 		case VME_SET_SLAVE:
390 
391 			copied = copy_from_user(&slave, argp, sizeof(slave));
392 			if (copied) {
393 				pr_warn("Partial copy from userspace\n");
394 				return -EFAULT;
395 			}
396 
397 			/* XXX	We do not want to push aspace, cycle and width
398 			 *	to userspace as they are
399 			 */
400 			return vme_slave_set(image[minor].resource,
401 				slave.enable, slave.vme_addr, slave.size,
402 				image[minor].pci_buf, slave.aspace,
403 				slave.cycle);
404 
405 			break;
406 		}
407 		break;
408 	}
409 
410 	return -EINVAL;
411 }
412 
413 static long
vme_user_unlocked_ioctl(struct file * file,unsigned int cmd,unsigned long arg)414 vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
415 {
416 	int ret;
417 	struct inode *inode = file_inode(file);
418 	unsigned int minor = iminor(inode);
419 
420 	mutex_lock(&image[minor].mutex);
421 	ret = vme_user_ioctl(inode, file, cmd, arg);
422 	mutex_unlock(&image[minor].mutex);
423 
424 	return ret;
425 }
426 
vme_user_vm_open(struct vm_area_struct * vma)427 static void vme_user_vm_open(struct vm_area_struct *vma)
428 {
429 	struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
430 
431 	refcount_inc(&vma_priv->refcnt);
432 }
433 
vme_user_vm_close(struct vm_area_struct * vma)434 static void vme_user_vm_close(struct vm_area_struct *vma)
435 {
436 	struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
437 	unsigned int minor = vma_priv->minor;
438 
439 	if (!refcount_dec_and_test(&vma_priv->refcnt))
440 		return;
441 
442 	mutex_lock(&image[minor].mutex);
443 	image[minor].mmap_count--;
444 	mutex_unlock(&image[minor].mutex);
445 
446 	kfree(vma_priv);
447 }
448 
449 static const struct vm_operations_struct vme_user_vm_ops = {
450 	.open = vme_user_vm_open,
451 	.close = vme_user_vm_close,
452 };
453 
vme_user_master_mmap(unsigned int minor,struct vm_area_struct * vma)454 static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
455 {
456 	int err;
457 	struct vme_user_vma_priv *vma_priv;
458 
459 	mutex_lock(&image[minor].mutex);
460 
461 	err = vme_master_mmap(image[minor].resource, vma);
462 	if (err) {
463 		mutex_unlock(&image[minor].mutex);
464 		return err;
465 	}
466 
467 	vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
468 	if (!vma_priv) {
469 		mutex_unlock(&image[minor].mutex);
470 		return -ENOMEM;
471 	}
472 
473 	vma_priv->minor = minor;
474 	refcount_set(&vma_priv->refcnt, 1);
475 	vma->vm_ops = &vme_user_vm_ops;
476 	vma->vm_private_data = vma_priv;
477 
478 	image[minor].mmap_count++;
479 
480 	mutex_unlock(&image[minor].mutex);
481 
482 	return 0;
483 }
484 
vme_user_mmap(struct file * file,struct vm_area_struct * vma)485 static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
486 {
487 	unsigned int minor = iminor(file_inode(file));
488 
489 	if (type[minor] == MASTER_MINOR)
490 		return vme_user_master_mmap(minor, vma);
491 
492 	return -ENODEV;
493 }
494 
495 static const struct file_operations vme_user_fops = {
496 	.read = vme_user_read,
497 	.write = vme_user_write,
498 	.llseek = vme_user_llseek,
499 	.unlocked_ioctl = vme_user_unlocked_ioctl,
500 	.compat_ioctl = compat_ptr_ioctl,
501 	.mmap = vme_user_mmap,
502 };
503 
vme_user_match(struct vme_dev * vdev)504 static int vme_user_match(struct vme_dev *vdev)
505 {
506 	int i;
507 
508 	int cur_bus = vme_bus_num(vdev);
509 	int cur_slot = vme_slot_num(vdev);
510 
511 	for (i = 0; i < bus_num; i++)
512 		if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
513 			return 1;
514 
515 	return 0;
516 }
517 
518 /*
519  * In this simple access driver, the old behaviour is being preserved as much
520  * as practical. We will therefore reserve the buffers and request the images
521  * here so that we don't have to do it later.
522  */
vme_user_probe(struct vme_dev * vdev)523 static int vme_user_probe(struct vme_dev *vdev)
524 {
525 	int i, err;
526 	char *name;
527 
528 	/* Save pointer to the bridge device */
529 	if (vme_user_bridge) {
530 		dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
531 		err = -EINVAL;
532 		goto err_dev;
533 	}
534 	vme_user_bridge = vdev;
535 
536 	/* Initialise descriptors */
537 	for (i = 0; i < VME_DEVS; i++) {
538 		image[i].kern_buf = NULL;
539 		image[i].pci_buf = 0;
540 		mutex_init(&image[i].mutex);
541 		image[i].device = NULL;
542 		image[i].resource = NULL;
543 	}
544 
545 	/* Assign major and minor numbers for the driver */
546 	err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS, DRIVER_NAME);
547 	if (err) {
548 		dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
549 			 VME_MAJOR);
550 		goto err_region;
551 	}
552 
553 	/* Register the driver as a char device */
554 	vme_user_cdev = cdev_alloc();
555 	if (!vme_user_cdev) {
556 		err = -ENOMEM;
557 		goto err_char;
558 	}
559 	vme_user_cdev->ops = &vme_user_fops;
560 	vme_user_cdev->owner = THIS_MODULE;
561 	err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
562 	if (err)
563 		goto err_class;
564 
565 	/* Request slave resources and allocate buffers (128kB wide) */
566 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
567 		/* XXX Need to properly request attributes */
568 		/* For ca91cx42 bridge there are only two slave windows
569 		 * supporting A16 addressing, so we request A24 supported
570 		 * by all windows.
571 		 */
572 		image[i].resource = vme_slave_request(vme_user_bridge,
573 						      VME_A24, VME_SCT);
574 		if (!image[i].resource) {
575 			dev_warn(&vdev->dev,
576 				 "Unable to allocate slave resource\n");
577 			err = -ENOMEM;
578 			goto err_slave;
579 		}
580 		image[i].size_buf = PCI_BUF_SIZE;
581 		image[i].kern_buf = vme_alloc_consistent(image[i].resource,
582 							 image[i].size_buf,
583 							 &image[i].pci_buf);
584 		if (!image[i].kern_buf) {
585 			dev_warn(&vdev->dev,
586 				 "Unable to allocate memory for buffer\n");
587 			image[i].pci_buf = 0;
588 			vme_slave_free(image[i].resource);
589 			err = -ENOMEM;
590 			goto err_slave;
591 		}
592 	}
593 
594 	/*
595 	 * Request master resources allocate page sized buffers for small
596 	 * reads and writes
597 	 */
598 	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
599 		/* XXX Need to properly request attributes */
600 		image[i].resource = vme_master_request(vme_user_bridge,
601 						       VME_A32, VME_SCT,
602 						       VME_D32);
603 		if (!image[i].resource) {
604 			dev_warn(&vdev->dev,
605 				 "Unable to allocate master resource\n");
606 			err = -ENOMEM;
607 			goto err_master;
608 		}
609 		image[i].size_buf = PCI_BUF_SIZE;
610 		image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
611 		if (!image[i].kern_buf) {
612 			err = -ENOMEM;
613 			vme_master_free(image[i].resource);
614 			goto err_master;
615 		}
616 	}
617 
618 	/* Create sysfs entries - on udev systems this creates the dev files */
619 	err = class_register(&vme_user_sysfs_class);
620 	if (err) {
621 		dev_err(&vdev->dev, "Error creating vme_user class.\n");
622 		goto err_master;
623 	}
624 
625 	/* Add sysfs Entries */
626 	for (i = 0; i < VME_DEVS; i++) {
627 		int num;
628 
629 		switch (type[i]) {
630 		case MASTER_MINOR:
631 			name = "bus/vme/m%d";
632 			break;
633 		case CONTROL_MINOR:
634 			name = "bus/vme/ctl";
635 			break;
636 		case SLAVE_MINOR:
637 			name = "bus/vme/s%d";
638 			break;
639 		default:
640 			err = -EINVAL;
641 			goto err_sysfs;
642 		}
643 
644 		num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
645 		image[i].device = device_create(&vme_user_sysfs_class, NULL,
646 						MKDEV(VME_MAJOR, i), NULL,
647 						name, num);
648 		if (IS_ERR(image[i].device)) {
649 			dev_info(&vdev->dev, "Error creating sysfs device\n");
650 			err = PTR_ERR(image[i].device);
651 			goto err_sysfs;
652 		}
653 	}
654 
655 	return 0;
656 
657 err_sysfs:
658 	while (i > 0) {
659 		i--;
660 		device_destroy(&vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
661 	}
662 	class_unregister(&vme_user_sysfs_class);
663 
664 	/* Ensure counter set correctly to unalloc all master windows */
665 	i = MASTER_MAX + 1;
666 err_master:
667 	while (i > MASTER_MINOR) {
668 		i--;
669 		kfree(image[i].kern_buf);
670 		vme_master_free(image[i].resource);
671 	}
672 
673 	/*
674 	 * Ensure counter set correctly to unalloc all slave windows and buffers
675 	 */
676 	i = SLAVE_MAX + 1;
677 err_slave:
678 	while (i > SLAVE_MINOR) {
679 		i--;
680 		vme_free_consistent(image[i].resource, image[i].size_buf,
681 				    image[i].kern_buf, image[i].pci_buf);
682 		vme_slave_free(image[i].resource);
683 	}
684 err_class:
685 	cdev_del(vme_user_cdev);
686 err_char:
687 	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
688 err_region:
689 err_dev:
690 	return err;
691 }
692 
vme_user_remove(struct vme_dev * dev)693 static void vme_user_remove(struct vme_dev *dev)
694 {
695 	int i;
696 
697 	/* Remove sysfs Entries */
698 	for (i = 0; i < VME_DEVS; i++) {
699 		mutex_destroy(&image[i].mutex);
700 		device_destroy(&vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
701 	}
702 	class_unregister(&vme_user_sysfs_class);
703 
704 	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
705 		kfree(image[i].kern_buf);
706 		vme_master_free(image[i].resource);
707 	}
708 
709 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
710 		vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
711 		vme_free_consistent(image[i].resource, image[i].size_buf,
712 				    image[i].kern_buf, image[i].pci_buf);
713 		vme_slave_free(image[i].resource);
714 	}
715 
716 	/* Unregister device driver */
717 	cdev_del(vme_user_cdev);
718 
719 	/* Unregister the major and minor device numbers */
720 	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
721 }
722 
723 static struct vme_driver vme_user_driver = {
724 	.name = DRIVER_NAME,
725 	.match = vme_user_match,
726 	.probe = vme_user_probe,
727 	.remove = vme_user_remove,
728 };
729 
vme_user_init(void)730 static int __init vme_user_init(void)
731 {
732 	int retval = 0;
733 
734 	pr_info("VME User Space Access Driver\n");
735 
736 	if (bus_num == 0) {
737 		pr_err("No cards, skipping registration\n");
738 		retval = -ENODEV;
739 		goto err_nocard;
740 	}
741 
742 	/* Let's start by supporting one bus, we can support more than one
743 	 * in future revisions if that ever becomes necessary.
744 	 */
745 	if (bus_num > VME_USER_BUS_MAX) {
746 		pr_err("Driver only able to handle %d buses\n",
747 		       VME_USER_BUS_MAX);
748 		bus_num = VME_USER_BUS_MAX;
749 	}
750 
751 	/*
752 	 * Here we just register the maximum number of devices we can and
753 	 * leave vme_user_match() to allow only 1 to go through to probe().
754 	 * This way, if we later want to allow multiple user access devices,
755 	 * we just change the code in vme_user_match().
756 	 */
757 	retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
758 	if (retval)
759 		goto err_reg;
760 
761 	return retval;
762 
763 err_reg:
764 err_nocard:
765 	return retval;
766 }
767 
vme_user_exit(void)768 static void __exit vme_user_exit(void)
769 {
770 	vme_unregister_driver(&vme_user_driver);
771 }
772 
773 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
774 module_param_array(bus, int, &bus_num, 0000);
775 
776 MODULE_DESCRIPTION("VME User Space Access Driver");
777 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
778 MODULE_LICENSE("GPL");
779 
780 module_init(vme_user_init);
781 module_exit(vme_user_exit);
782