1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4   * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
5   * James Leu (jleu@mindspring.net).
6   * Copyright (C) 2001 by various other people who didn't put their name here.
7   */
8  
9  #include <linux/memblock.h>
10  #include <linux/etherdevice.h>
11  #include <linux/ethtool.h>
12  #include <linux/inetdevice.h>
13  #include <linux/init.h>
14  #include <linux/list.h>
15  #include <linux/netdevice.h>
16  #include <linux/platform_device.h>
17  #include <linux/rtnetlink.h>
18  #include <linux/skbuff.h>
19  #include <linux/slab.h>
20  #include <linux/spinlock.h>
21  #include <init.h>
22  #include <irq_kern.h>
23  #include <irq_user.h>
24  #include "mconsole_kern.h"
25  #include <net_kern.h>
26  #include <net_user.h>
27  
28  #define DRIVER_NAME "uml-netdev"
29  
30  static DEFINE_SPINLOCK(opened_lock);
31  static LIST_HEAD(opened);
32  
33  /*
34   * The drop_skb is used when we can't allocate an skb.  The
35   * packet is read into drop_skb in order to get the data off the
36   * connection to the host.
37   * It is reallocated whenever a maximum packet size is seen which is
38   * larger than any seen before.  update_drop_skb is called from
39   * eth_configure when a new interface is added.
40   */
41  static DEFINE_SPINLOCK(drop_lock);
42  static struct sk_buff *drop_skb;
43  static int drop_max;
44  
update_drop_skb(int max)45  static int update_drop_skb(int max)
46  {
47  	struct sk_buff *new;
48  	unsigned long flags;
49  	int err = 0;
50  
51  	spin_lock_irqsave(&drop_lock, flags);
52  
53  	if (max <= drop_max)
54  		goto out;
55  
56  	err = -ENOMEM;
57  	new = dev_alloc_skb(max);
58  	if (new == NULL)
59  		goto out;
60  
61  	skb_put(new, max);
62  
63  	kfree_skb(drop_skb);
64  	drop_skb = new;
65  	drop_max = max;
66  	err = 0;
67  out:
68  	spin_unlock_irqrestore(&drop_lock, flags);
69  
70  	return err;
71  }
72  
uml_net_rx(struct net_device * dev)73  static int uml_net_rx(struct net_device *dev)
74  {
75  	struct uml_net_private *lp = netdev_priv(dev);
76  	int pkt_len;
77  	struct sk_buff *skb;
78  
79  	/* If we can't allocate memory, try again next round. */
80  	skb = dev_alloc_skb(lp->max_packet);
81  	if (skb == NULL) {
82  		drop_skb->dev = dev;
83  		/* Read a packet into drop_skb and don't do anything with it. */
84  		(*lp->read)(lp->fd, drop_skb, lp);
85  		dev->stats.rx_dropped++;
86  		return 0;
87  	}
88  
89  	skb->dev = dev;
90  	skb_put(skb, lp->max_packet);
91  	skb_reset_mac_header(skb);
92  	pkt_len = (*lp->read)(lp->fd, skb, lp);
93  
94  	if (pkt_len > 0) {
95  		skb_trim(skb, pkt_len);
96  		skb->protocol = (*lp->protocol)(skb);
97  
98  		dev->stats.rx_bytes += skb->len;
99  		dev->stats.rx_packets++;
100  		netif_rx(skb);
101  		return pkt_len;
102  	}
103  
104  	kfree_skb(skb);
105  	return pkt_len;
106  }
107  
uml_dev_close(struct work_struct * work)108  static void uml_dev_close(struct work_struct *work)
109  {
110  	struct uml_net_private *lp =
111  		container_of(work, struct uml_net_private, work);
112  	dev_close(lp->dev);
113  }
114  
uml_net_interrupt(int irq,void * dev_id)115  static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
116  {
117  	struct net_device *dev = dev_id;
118  	struct uml_net_private *lp = netdev_priv(dev);
119  	int err;
120  
121  	if (!netif_running(dev))
122  		return IRQ_NONE;
123  
124  	spin_lock(&lp->lock);
125  	while ((err = uml_net_rx(dev)) > 0) ;
126  	if (err < 0) {
127  		printk(KERN_ERR
128  		       "Device '%s' read returned %d, shutting it down\n",
129  		       dev->name, err);
130  		/* dev_close can't be called in interrupt context, and takes
131  		 * again lp->lock.
132  		 * And dev_close() can be safely called multiple times on the
133  		 * same device, since it tests for (dev->flags & IFF_UP). So
134  		 * there's no harm in delaying the device shutdown.
135  		 * Furthermore, the workqueue will not re-enqueue an already
136  		 * enqueued work item. */
137  		schedule_work(&lp->work);
138  		goto out;
139  	}
140  out:
141  	spin_unlock(&lp->lock);
142  	return IRQ_HANDLED;
143  }
144  
uml_net_open(struct net_device * dev)145  static int uml_net_open(struct net_device *dev)
146  {
147  	struct uml_net_private *lp = netdev_priv(dev);
148  	int err;
149  
150  	if (lp->fd >= 0) {
151  		err = -ENXIO;
152  		goto out;
153  	}
154  
155  	lp->fd = (*lp->open)(&lp->user);
156  	if (lp->fd < 0) {
157  		err = lp->fd;
158  		goto out;
159  	}
160  
161  	err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
162  			     IRQF_SHARED, dev->name, dev);
163  	if (err < 0) {
164  		printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
165  		err = -ENETUNREACH;
166  		goto out_close;
167  	}
168  
169  	netif_start_queue(dev);
170  
171  	/* clear buffer - it can happen that the host side of the interface
172  	 * is full when we get here.  In this case, new data is never queued,
173  	 * SIGIOs never arrive, and the net never works.
174  	 */
175  	while ((err = uml_net_rx(dev)) > 0) ;
176  
177  	spin_lock(&opened_lock);
178  	list_add(&lp->list, &opened);
179  	spin_unlock(&opened_lock);
180  
181  	return 0;
182  out_close:
183  	if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
184  	lp->fd = -1;
185  out:
186  	return err;
187  }
188  
uml_net_close(struct net_device * dev)189  static int uml_net_close(struct net_device *dev)
190  {
191  	struct uml_net_private *lp = netdev_priv(dev);
192  
193  	netif_stop_queue(dev);
194  
195  	um_free_irq(dev->irq, dev);
196  	if (lp->close != NULL)
197  		(*lp->close)(lp->fd, &lp->user);
198  	lp->fd = -1;
199  
200  	spin_lock(&opened_lock);
201  	list_del(&lp->list);
202  	spin_unlock(&opened_lock);
203  
204  	return 0;
205  }
206  
uml_net_start_xmit(struct sk_buff * skb,struct net_device * dev)207  static netdev_tx_t uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
208  {
209  	struct uml_net_private *lp = netdev_priv(dev);
210  	unsigned long flags;
211  	int len;
212  
213  	netif_stop_queue(dev);
214  
215  	spin_lock_irqsave(&lp->lock, flags);
216  
217  	len = (*lp->write)(lp->fd, skb, lp);
218  	skb_tx_timestamp(skb);
219  
220  	if (len == skb->len) {
221  		dev->stats.tx_packets++;
222  		dev->stats.tx_bytes += skb->len;
223  		netif_trans_update(dev);
224  		netif_start_queue(dev);
225  
226  		/* this is normally done in the interrupt when tx finishes */
227  		netif_wake_queue(dev);
228  	}
229  	else if (len == 0) {
230  		netif_start_queue(dev);
231  		dev->stats.tx_dropped++;
232  	}
233  	else {
234  		netif_start_queue(dev);
235  		printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
236  	}
237  
238  	spin_unlock_irqrestore(&lp->lock, flags);
239  
240  	dev_consume_skb_any(skb);
241  
242  	return NETDEV_TX_OK;
243  }
244  
uml_net_set_multicast_list(struct net_device * dev)245  static void uml_net_set_multicast_list(struct net_device *dev)
246  {
247  	return;
248  }
249  
uml_net_tx_timeout(struct net_device * dev,unsigned int txqueue)250  static void uml_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
251  {
252  	netif_trans_update(dev);
253  	netif_wake_queue(dev);
254  }
255  
256  #ifdef CONFIG_NET_POLL_CONTROLLER
uml_net_poll_controller(struct net_device * dev)257  static void uml_net_poll_controller(struct net_device *dev)
258  {
259  	disable_irq(dev->irq);
260  	uml_net_interrupt(dev->irq, dev);
261  	enable_irq(dev->irq);
262  }
263  #endif
264  
uml_net_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)265  static void uml_net_get_drvinfo(struct net_device *dev,
266  				struct ethtool_drvinfo *info)
267  {
268  	strscpy(info->driver, DRIVER_NAME);
269  }
270  
271  static const struct ethtool_ops uml_net_ethtool_ops = {
272  	.get_drvinfo	= uml_net_get_drvinfo,
273  	.get_link	= ethtool_op_get_link,
274  	.get_ts_info	= ethtool_op_get_ts_info,
275  };
276  
uml_net_setup_etheraddr(struct net_device * dev,char * str)277  void uml_net_setup_etheraddr(struct net_device *dev, char *str)
278  {
279  	u8 addr[ETH_ALEN];
280  	char *end;
281  	int i;
282  
283  	if (str == NULL)
284  		goto random;
285  
286  	for (i = 0; i < 6; i++) {
287  		addr[i] = simple_strtoul(str, &end, 16);
288  		if ((end == str) ||
289  		   ((*end != ':') && (*end != ',') && (*end != '\0'))) {
290  			printk(KERN_ERR
291  			       "setup_etheraddr: failed to parse '%s' "
292  			       "as an ethernet address\n", str);
293  			goto random;
294  		}
295  		str = end + 1;
296  	}
297  	if (is_multicast_ether_addr(addr)) {
298  		printk(KERN_ERR
299  		       "Attempt to assign a multicast ethernet address to a "
300  		       "device disallowed\n");
301  		goto random;
302  	}
303  	if (!is_valid_ether_addr(addr)) {
304  		printk(KERN_ERR
305  		       "Attempt to assign an invalid ethernet address to a "
306  		       "device disallowed\n");
307  		goto random;
308  	}
309  	if (!is_local_ether_addr(addr)) {
310  		printk(KERN_WARNING
311  		       "Warning: Assigning a globally valid ethernet "
312  		       "address to a device\n");
313  		printk(KERN_WARNING "You should set the 2nd rightmost bit in "
314  		       "the first byte of the MAC,\n");
315  		printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
316  		       addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
317  		       addr[5]);
318  	}
319  	eth_hw_addr_set(dev, addr);
320  	return;
321  
322  random:
323  	printk(KERN_INFO
324  	       "Choosing a random ethernet address for device %s\n", dev->name);
325  	eth_hw_addr_random(dev);
326  }
327  
328  static DEFINE_SPINLOCK(devices_lock);
329  static LIST_HEAD(devices);
330  
331  static struct platform_driver uml_net_driver = {
332  	.driver = {
333  		.name  = DRIVER_NAME,
334  	},
335  };
336  
net_device_release(struct device * dev)337  static void net_device_release(struct device *dev)
338  {
339  	struct uml_net *device = dev_get_drvdata(dev);
340  	struct net_device *netdev = device->dev;
341  	struct uml_net_private *lp = netdev_priv(netdev);
342  
343  	if (lp->remove != NULL)
344  		(*lp->remove)(&lp->user);
345  	list_del(&device->list);
346  	kfree(device);
347  	free_netdev(netdev);
348  }
349  
350  static const struct net_device_ops uml_netdev_ops = {
351  	.ndo_open 		= uml_net_open,
352  	.ndo_stop 		= uml_net_close,
353  	.ndo_start_xmit 	= uml_net_start_xmit,
354  	.ndo_set_rx_mode	= uml_net_set_multicast_list,
355  	.ndo_tx_timeout 	= uml_net_tx_timeout,
356  	.ndo_set_mac_address	= eth_mac_addr,
357  	.ndo_validate_addr	= eth_validate_addr,
358  #ifdef CONFIG_NET_POLL_CONTROLLER
359  	.ndo_poll_controller = uml_net_poll_controller,
360  #endif
361  };
362  
363  /*
364   * Ensures that platform_driver_register is called only once by
365   * eth_configure.  Will be set in an initcall.
366   */
367  static int driver_registered;
368  
eth_configure(int n,void * init,char * mac,struct transport * transport,gfp_t gfp_mask)369  static void eth_configure(int n, void *init, char *mac,
370  			  struct transport *transport, gfp_t gfp_mask)
371  {
372  	struct uml_net *device;
373  	struct net_device *dev;
374  	struct uml_net_private *lp;
375  	int err, size;
376  
377  	size = transport->private_size + sizeof(struct uml_net_private);
378  
379  	device = kzalloc(sizeof(*device), gfp_mask);
380  	if (device == NULL) {
381  		printk(KERN_ERR "eth_configure failed to allocate struct "
382  		       "uml_net\n");
383  		return;
384  	}
385  
386  	dev = alloc_etherdev(size);
387  	if (dev == NULL) {
388  		printk(KERN_ERR "eth_configure: failed to allocate struct "
389  		       "net_device for eth%d\n", n);
390  		goto out_free_device;
391  	}
392  
393  	INIT_LIST_HEAD(&device->list);
394  	device->index = n;
395  
396  	/* If this name ends up conflicting with an existing registered
397  	 * netdevice, that is OK, register_netdev{,ice}() will notice this
398  	 * and fail.
399  	 */
400  	snprintf(dev->name, sizeof(dev->name), "eth%d", n);
401  
402  	uml_net_setup_etheraddr(dev, mac);
403  
404  	printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
405  
406  	lp = netdev_priv(dev);
407  	/* This points to the transport private data. It's still clear, but we
408  	 * must memset it to 0 *now*. Let's help the drivers. */
409  	memset(lp, 0, size);
410  	INIT_WORK(&lp->work, uml_dev_close);
411  
412  	/* sysfs register */
413  	if (!driver_registered) {
414  		platform_driver_register(&uml_net_driver);
415  		driver_registered = 1;
416  	}
417  	device->pdev.id = n;
418  	device->pdev.name = DRIVER_NAME;
419  	device->pdev.dev.release = net_device_release;
420  	dev_set_drvdata(&device->pdev.dev, device);
421  	if (platform_device_register(&device->pdev))
422  		goto out_free_netdev;
423  	SET_NETDEV_DEV(dev,&device->pdev.dev);
424  
425  	device->dev = dev;
426  
427  	/*
428  	 * These just fill in a data structure, so there's no failure
429  	 * to be worried about.
430  	 */
431  	(*transport->kern->init)(dev, init);
432  
433  	*lp = ((struct uml_net_private)
434  		{ .list  		= LIST_HEAD_INIT(lp->list),
435  		  .dev 			= dev,
436  		  .fd 			= -1,
437  		  .mac 			= { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
438  		  .max_packet		= transport->user->max_packet,
439  		  .protocol 		= transport->kern->protocol,
440  		  .open 		= transport->user->open,
441  		  .close 		= transport->user->close,
442  		  .remove 		= transport->user->remove,
443  		  .read 		= transport->kern->read,
444  		  .write 		= transport->kern->write,
445  		  .add_address 		= transport->user->add_address,
446  		  .delete_address  	= transport->user->delete_address });
447  
448  	spin_lock_init(&lp->lock);
449  	memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac));
450  
451  	if ((transport->user->init != NULL) &&
452  	    ((*transport->user->init)(&lp->user, dev) != 0))
453  		goto out_unregister;
454  
455  	dev->mtu = transport->user->mtu;
456  	dev->netdev_ops = &uml_netdev_ops;
457  	dev->ethtool_ops = &uml_net_ethtool_ops;
458  	dev->watchdog_timeo = (HZ >> 1);
459  	dev->irq = UM_ETH_IRQ;
460  
461  	err = update_drop_skb(lp->max_packet);
462  	if (err)
463  		goto out_undo_user_init;
464  
465  	rtnl_lock();
466  	err = register_netdevice(dev);
467  	rtnl_unlock();
468  	if (err)
469  		goto out_undo_user_init;
470  
471  	spin_lock(&devices_lock);
472  	list_add(&device->list, &devices);
473  	spin_unlock(&devices_lock);
474  
475  	return;
476  
477  out_undo_user_init:
478  	if (transport->user->remove != NULL)
479  		(*transport->user->remove)(&lp->user);
480  out_unregister:
481  	platform_device_unregister(&device->pdev);
482  	return; /* platform_device_unregister frees dev and device */
483  out_free_netdev:
484  	free_netdev(dev);
485  out_free_device:
486  	kfree(device);
487  }
488  
find_device(int n)489  static struct uml_net *find_device(int n)
490  {
491  	struct uml_net *device;
492  	struct list_head *ele;
493  
494  	spin_lock(&devices_lock);
495  	list_for_each(ele, &devices) {
496  		device = list_entry(ele, struct uml_net, list);
497  		if (device->index == n)
498  			goto out;
499  	}
500  	device = NULL;
501   out:
502  	spin_unlock(&devices_lock);
503  	return device;
504  }
505  
eth_parse(char * str,int * index_out,char ** str_out,char ** error_out)506  static int eth_parse(char *str, int *index_out, char **str_out,
507  		     char **error_out)
508  {
509  	char *end;
510  	int n, err = -EINVAL;
511  
512  	n = simple_strtoul(str, &end, 0);
513  	if (end == str) {
514  		*error_out = "Bad device number";
515  		return err;
516  	}
517  
518  	str = end;
519  	if (*str != '=') {
520  		*error_out = "Expected '=' after device number";
521  		return err;
522  	}
523  
524  	str++;
525  	if (find_device(n)) {
526  		*error_out = "Device already configured";
527  		return err;
528  	}
529  
530  	*index_out = n;
531  	*str_out = str;
532  	return 0;
533  }
534  
535  struct eth_init {
536  	struct list_head list;
537  	char *init;
538  	int index;
539  };
540  
541  static DEFINE_SPINLOCK(transports_lock);
542  static LIST_HEAD(transports);
543  
544  /* Filled in during early boot */
545  static LIST_HEAD(eth_cmd_line);
546  
check_transport(struct transport * transport,char * eth,int n,void ** init_out,char ** mac_out,gfp_t gfp_mask)547  static int check_transport(struct transport *transport, char *eth, int n,
548  			   void **init_out, char **mac_out, gfp_t gfp_mask)
549  {
550  	int len;
551  
552  	len = strlen(transport->name);
553  	if (strncmp(eth, transport->name, len))
554  		return 0;
555  
556  	eth += len;
557  	if (*eth == ',')
558  		eth++;
559  	else if (*eth != '\0')
560  		return 0;
561  
562  	*init_out = kmalloc(transport->setup_size, gfp_mask);
563  	if (*init_out == NULL)
564  		return 1;
565  
566  	if (!transport->setup(eth, mac_out, *init_out)) {
567  		kfree(*init_out);
568  		*init_out = NULL;
569  	}
570  	return 1;
571  }
572  
register_transport(struct transport * new)573  void register_transport(struct transport *new)
574  {
575  	struct list_head *ele, *next;
576  	struct eth_init *eth;
577  	void *init;
578  	char *mac = NULL;
579  	int match;
580  
581  	spin_lock(&transports_lock);
582  	BUG_ON(!list_empty(&new->list));
583  	list_add(&new->list, &transports);
584  	spin_unlock(&transports_lock);
585  
586  	list_for_each_safe(ele, next, &eth_cmd_line) {
587  		eth = list_entry(ele, struct eth_init, list);
588  		match = check_transport(new, eth->init, eth->index, &init,
589  					&mac, GFP_KERNEL);
590  		if (!match)
591  			continue;
592  		else if (init != NULL) {
593  			eth_configure(eth->index, init, mac, new, GFP_KERNEL);
594  			kfree(init);
595  		}
596  		list_del(&eth->list);
597  	}
598  }
599  
eth_setup_common(char * str,int index)600  static int eth_setup_common(char *str, int index)
601  {
602  	struct list_head *ele;
603  	struct transport *transport;
604  	void *init;
605  	char *mac = NULL;
606  	int found = 0;
607  
608  	spin_lock(&transports_lock);
609  	list_for_each(ele, &transports) {
610  		transport = list_entry(ele, struct transport, list);
611  	        if (!check_transport(transport, str, index, &init,
612  					&mac, GFP_ATOMIC))
613  			continue;
614  		if (init != NULL) {
615  			eth_configure(index, init, mac, transport, GFP_ATOMIC);
616  			kfree(init);
617  		}
618  		found = 1;
619  		break;
620  	}
621  
622  	spin_unlock(&transports_lock);
623  	return found;
624  }
625  
eth_setup(char * str)626  static int __init eth_setup(char *str)
627  {
628  	struct eth_init *new;
629  	char *error;
630  	int n, err;
631  
632  	err = eth_parse(str, &n, &str, &error);
633  	if (err) {
634  		printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
635  		       str, error);
636  		return 1;
637  	}
638  
639  	new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
640  	if (!new)
641  		panic("%s: Failed to allocate %zu bytes\n", __func__,
642  		      sizeof(*new));
643  
644  	INIT_LIST_HEAD(&new->list);
645  	new->index = n;
646  	new->init = str;
647  
648  	list_add_tail(&new->list, &eth_cmd_line);
649  	return 1;
650  }
651  
652  __setup("eth", eth_setup);
653  __uml_help(eth_setup,
654  "eth[0-9]+=<transport>,<options>\n"
655  "    Configure a network device.\n\n"
656  );
657  
net_config(char * str,char ** error_out)658  static int net_config(char *str, char **error_out)
659  {
660  	int n, err;
661  
662  	err = eth_parse(str, &n, &str, error_out);
663  	if (err)
664  		return err;
665  
666  	/* This string is broken up and the pieces used by the underlying
667  	 * driver.  So, it is freed only if eth_setup_common fails.
668  	 */
669  	str = kstrdup(str, GFP_KERNEL);
670  	if (str == NULL) {
671  	        *error_out = "net_config failed to strdup string";
672  		return -ENOMEM;
673  	}
674  	err = !eth_setup_common(str, n);
675  	if (err)
676  		kfree(str);
677  	return err;
678  }
679  
net_id(char ** str,int * start_out,int * end_out)680  static int net_id(char **str, int *start_out, int *end_out)
681  {
682  	char *end;
683  	int n;
684  
685  	n = simple_strtoul(*str, &end, 0);
686  	if ((*end != '\0') || (end == *str))
687  		return -1;
688  
689  	*start_out = n;
690  	*end_out = n;
691  	*str = end;
692  	return n;
693  }
694  
net_remove(int n,char ** error_out)695  static int net_remove(int n, char **error_out)
696  {
697  	struct uml_net *device;
698  	struct net_device *dev;
699  	struct uml_net_private *lp;
700  
701  	device = find_device(n);
702  	if (device == NULL)
703  		return -ENODEV;
704  
705  	dev = device->dev;
706  	lp = netdev_priv(dev);
707  	if (lp->fd > 0)
708  		return -EBUSY;
709  	unregister_netdev(dev);
710  	platform_device_unregister(&device->pdev);
711  
712  	return 0;
713  }
714  
715  static struct mc_device net_mc = {
716  	.list		= LIST_HEAD_INIT(net_mc.list),
717  	.name		= "eth",
718  	.config		= net_config,
719  	.get_config	= NULL,
720  	.id		= net_id,
721  	.remove		= net_remove,
722  };
723  
724  #ifdef CONFIG_INET
uml_inetaddr_event(struct notifier_block * this,unsigned long event,void * ptr)725  static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
726  			      void *ptr)
727  {
728  	struct in_ifaddr *ifa = ptr;
729  	struct net_device *dev = ifa->ifa_dev->dev;
730  	struct uml_net_private *lp;
731  	void (*proc)(unsigned char *, unsigned char *, void *);
732  	unsigned char addr_buf[4], netmask_buf[4];
733  
734  	if (dev->netdev_ops->ndo_open != uml_net_open)
735  		return NOTIFY_DONE;
736  
737  	lp = netdev_priv(dev);
738  
739  	proc = NULL;
740  	switch (event) {
741  	case NETDEV_UP:
742  		proc = lp->add_address;
743  		break;
744  	case NETDEV_DOWN:
745  		proc = lp->delete_address;
746  		break;
747  	}
748  	if (proc != NULL) {
749  		memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
750  		memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
751  		(*proc)(addr_buf, netmask_buf, &lp->user);
752  	}
753  	return NOTIFY_DONE;
754  }
755  
756  /* uml_net_init shouldn't be called twice on two CPUs at the same time */
757  static struct notifier_block uml_inetaddr_notifier = {
758  	.notifier_call		= uml_inetaddr_event,
759  };
760  
inet_register(void)761  static void inet_register(void)
762  {
763  	struct list_head *ele;
764  	struct uml_net_private *lp;
765  	struct in_device *ip;
766  	struct in_ifaddr *in;
767  
768  	register_inetaddr_notifier(&uml_inetaddr_notifier);
769  
770  	/* Devices may have been opened already, so the uml_inetaddr_notifier
771  	 * didn't get a chance to run for them.  This fakes it so that
772  	 * addresses which have already been set up get handled properly.
773  	 */
774  	spin_lock(&opened_lock);
775  	list_for_each(ele, &opened) {
776  		lp = list_entry(ele, struct uml_net_private, list);
777  		ip = lp->dev->ip_ptr;
778  		if (ip == NULL)
779  			continue;
780  		in = ip->ifa_list;
781  		while (in != NULL) {
782  			uml_inetaddr_event(NULL, NETDEV_UP, in);
783  			in = in->ifa_next;
784  		}
785  	}
786  	spin_unlock(&opened_lock);
787  }
788  #else
inet_register(void)789  static inline void inet_register(void)
790  {
791  }
792  #endif
793  
uml_net_init(void)794  static int uml_net_init(void)
795  {
796  	mconsole_register_dev(&net_mc);
797  	inet_register();
798  	return 0;
799  }
800  
801  __initcall(uml_net_init);
802  
close_devices(void)803  static void close_devices(void)
804  {
805  	struct list_head *ele;
806  	struct uml_net_private *lp;
807  
808  	spin_lock(&opened_lock);
809  	list_for_each(ele, &opened) {
810  		lp = list_entry(ele, struct uml_net_private, list);
811  		um_free_irq(lp->dev->irq, lp->dev);
812  		if ((lp->close != NULL) && (lp->fd >= 0))
813  			(*lp->close)(lp->fd, &lp->user);
814  		if (lp->remove != NULL)
815  			(*lp->remove)(&lp->user);
816  	}
817  	spin_unlock(&opened_lock);
818  }
819  
820  __uml_exitcall(close_devices);
821  
iter_addresses(void * d,void (* cb)(unsigned char *,unsigned char *,void *),void * arg)822  void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
823  					void *),
824  		    void *arg)
825  {
826  	struct net_device *dev = d;
827  	struct in_device *ip = dev->ip_ptr;
828  	struct in_ifaddr *in;
829  	unsigned char address[4], netmask[4];
830  
831  	if (ip == NULL) return;
832  	in = ip->ifa_list;
833  	while (in != NULL) {
834  		memcpy(address, &in->ifa_address, sizeof(address));
835  		memcpy(netmask, &in->ifa_mask, sizeof(netmask));
836  		(*cb)(address, netmask, arg);
837  		in = in->ifa_next;
838  	}
839  }
840  
dev_netmask(void * d,void * m)841  int dev_netmask(void *d, void *m)
842  {
843  	struct net_device *dev = d;
844  	struct in_device *ip = dev->ip_ptr;
845  	struct in_ifaddr *in;
846  	__be32 *mask_out = m;
847  
848  	if (ip == NULL)
849  		return 1;
850  
851  	in = ip->ifa_list;
852  	if (in == NULL)
853  		return 1;
854  
855  	*mask_out = in->ifa_mask;
856  	return 0;
857  }
858  
get_output_buffer(int * len_out)859  void *get_output_buffer(int *len_out)
860  {
861  	void *ret;
862  
863  	ret = (void *) __get_free_pages(GFP_KERNEL, 0);
864  	if (ret) *len_out = PAGE_SIZE;
865  	else *len_out = 0;
866  	return ret;
867  }
868  
free_output_buffer(void * buffer)869  void free_output_buffer(void *buffer)
870  {
871  	free_pages((unsigned long) buffer, 0);
872  }
873  
tap_setup_common(char * str,char * type,char ** dev_name,char ** mac_out,char ** gate_addr)874  int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
875  		     char **gate_addr)
876  {
877  	char *remain;
878  
879  	remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
880  	if (remain != NULL) {
881  		printk(KERN_ERR "tap_setup_common - Extra garbage on "
882  		       "specification : '%s'\n", remain);
883  		return 1;
884  	}
885  
886  	return 0;
887  }
888  
eth_protocol(struct sk_buff * skb)889  unsigned short eth_protocol(struct sk_buff *skb)
890  {
891  	return eth_type_trans(skb, skb->dev);
892  }
893