1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
4   * Intel Management Engine Interface (Intel MEI) Linux driver
5   */
6  
7  #include <linux/module.h>
8  #include <linux/device.h>
9  #include <linux/kernel.h>
10  #include <linux/sched/signal.h>
11  #include <linux/init.h>
12  #include <linux/errno.h>
13  #include <linux/slab.h>
14  #include <linux/mutex.h>
15  #include <linux/interrupt.h>
16  #include <linux/scatterlist.h>
17  #include <linux/mei_cl_bus.h>
18  
19  #include "mei_dev.h"
20  #include "client.h"
21  
22  #define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver)
23  
24  /**
25   * __mei_cl_send - internal client send (write)
26   *
27   * @cl: host client
28   * @buf: buffer to send
29   * @length: buffer length
30   * @vtag: virtual tag
31   * @mode: sending mode
32   *
33   * Return: written size bytes or < 0 on error
34   */
__mei_cl_send(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode)35  ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
36  		      unsigned int mode)
37  {
38  	return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
39  }
40  
41  /**
42   * __mei_cl_send_timeout - internal client send (write)
43   *
44   * @cl: host client
45   * @buf: buffer to send
46   * @length: buffer length
47   * @vtag: virtual tag
48   * @mode: sending mode
49   * @timeout: send timeout in milliseconds.
50   *           effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
51   *           set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
52   *
53   * Return: written size bytes or < 0 on error
54   */
__mei_cl_send_timeout(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode,unsigned long timeout)55  ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
56  			      unsigned int mode, unsigned long timeout)
57  {
58  	struct mei_device *bus;
59  	struct mei_cl_cb *cb;
60  	ssize_t rets;
61  
62  	if (WARN_ON(!cl || !cl->dev))
63  		return -ENODEV;
64  
65  	bus = cl->dev;
66  
67  	mutex_lock(&bus->device_lock);
68  	if (bus->dev_state != MEI_DEV_ENABLED &&
69  	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
70  		rets = -ENODEV;
71  		goto out;
72  	}
73  
74  	if (!mei_cl_is_connected(cl)) {
75  		rets = -ENODEV;
76  		goto out;
77  	}
78  
79  	/* Check if we have an ME client device */
80  	if (!mei_me_cl_is_active(cl->me_cl)) {
81  		rets = -ENOTTY;
82  		goto out;
83  	}
84  
85  	if (vtag) {
86  		/* Check if vtag is supported by client */
87  		rets = mei_cl_vt_support_check(cl);
88  		if (rets)
89  			goto out;
90  	}
91  
92  	if (length > mei_cl_mtu(cl)) {
93  		rets = -EFBIG;
94  		goto out;
95  	}
96  
97  	while (cl->tx_cb_queued >= bus->tx_queue_limit) {
98  		mutex_unlock(&bus->device_lock);
99  		rets = wait_event_interruptible(cl->tx_wait,
100  				cl->writing_state == MEI_WRITE_COMPLETE ||
101  				(!mei_cl_is_connected(cl)));
102  		mutex_lock(&bus->device_lock);
103  		if (rets) {
104  			if (signal_pending(current))
105  				rets = -EINTR;
106  			goto out;
107  		}
108  		if (!mei_cl_is_connected(cl)) {
109  			rets = -ENODEV;
110  			goto out;
111  		}
112  	}
113  
114  	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
115  	if (!cb) {
116  		rets = -ENOMEM;
117  		goto out;
118  	}
119  	cb->vtag = vtag;
120  
121  	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
122  	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
123  	memcpy(cb->buf.data, buf, length);
124  	/* hack we point data to header */
125  	if (mode & MEI_CL_IO_SGL) {
126  		cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
127  		cb->buf.data = NULL;
128  		cb->buf.size = 0;
129  	}
130  
131  	rets = mei_cl_write(cl, cb, timeout);
132  
133  	if (mode & MEI_CL_IO_SGL && rets == 0)
134  		rets = length;
135  
136  out:
137  	mutex_unlock(&bus->device_lock);
138  
139  	return rets;
140  }
141  
142  /**
143   * __mei_cl_recv - internal client receive (read)
144   *
145   * @cl: host client
146   * @buf: buffer to receive
147   * @length: buffer length
148   * @mode: io mode
149   * @vtag: virtual tag
150   * @timeout: recv timeout, 0 for infinite timeout
151   *
152   * Return: read size in bytes of < 0 on error
153   */
__mei_cl_recv(struct mei_cl * cl,u8 * buf,size_t length,u8 * vtag,unsigned int mode,unsigned long timeout)154  ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
155  		      unsigned int mode, unsigned long timeout)
156  {
157  	struct mei_device *bus;
158  	struct mei_cl_cb *cb;
159  	size_t r_length;
160  	ssize_t rets;
161  	bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
162  
163  	if (WARN_ON(!cl || !cl->dev))
164  		return -ENODEV;
165  
166  	bus = cl->dev;
167  
168  	mutex_lock(&bus->device_lock);
169  	if (bus->dev_state != MEI_DEV_ENABLED &&
170  	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
171  		rets = -ENODEV;
172  		goto out;
173  	}
174  
175  	cb = mei_cl_read_cb(cl, NULL);
176  	if (cb)
177  		goto copy;
178  
179  	rets = mei_cl_read_start(cl, length, NULL);
180  	if (rets && rets != -EBUSY)
181  		goto out;
182  
183  	if (nonblock) {
184  		rets = -EAGAIN;
185  		goto out;
186  	}
187  
188  	/* wait on event only if there is no other waiter */
189  	/* synchronized under device mutex */
190  	if (!waitqueue_active(&cl->rx_wait)) {
191  
192  		mutex_unlock(&bus->device_lock);
193  
194  		if (timeout) {
195  			rets = wait_event_interruptible_timeout
196  					(cl->rx_wait,
197  					mei_cl_read_cb(cl, NULL) ||
198  					(!mei_cl_is_connected(cl)),
199  					msecs_to_jiffies(timeout));
200  			if (rets == 0)
201  				return -ETIME;
202  			if (rets < 0) {
203  				if (signal_pending(current))
204  					return -EINTR;
205  				return -ERESTARTSYS;
206  			}
207  		} else {
208  			if (wait_event_interruptible
209  					(cl->rx_wait,
210  					mei_cl_read_cb(cl, NULL) ||
211  					(!mei_cl_is_connected(cl)))) {
212  				if (signal_pending(current))
213  					return -EINTR;
214  				return -ERESTARTSYS;
215  			}
216  		}
217  
218  		mutex_lock(&bus->device_lock);
219  
220  		if (!mei_cl_is_connected(cl)) {
221  			rets = -ENODEV;
222  			goto out;
223  		}
224  	}
225  
226  	cb = mei_cl_read_cb(cl, NULL);
227  	if (!cb) {
228  		rets = 0;
229  		goto out;
230  	}
231  
232  copy:
233  	if (cb->status) {
234  		rets = cb->status;
235  		goto free;
236  	}
237  
238  	/* for the GSC type - copy the extended header to the buffer */
239  	if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
240  		r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
241  		memcpy(buf, cb->ext_hdr, r_length);
242  	} else {
243  		r_length = min_t(size_t, length, cb->buf_idx);
244  		memcpy(buf, cb->buf.data, r_length);
245  	}
246  	rets = r_length;
247  
248  	if (vtag)
249  		*vtag = cb->vtag;
250  
251  free:
252  	mei_cl_del_rd_completed(cl, cb);
253  out:
254  	mutex_unlock(&bus->device_lock);
255  
256  	return rets;
257  }
258  
259  /**
260   * mei_cldev_send_vtag - me device send with vtag (write)
261   *
262   * @cldev: me client device
263   * @buf: buffer to send
264   * @length: buffer length
265   * @vtag: virtual tag
266   *
267   * Return:
268   *  * written size in bytes
269   *  * < 0 on error
270   */
271  
mei_cldev_send_vtag(struct mei_cl_device * cldev,const u8 * buf,size_t length,u8 vtag)272  ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
273  			    size_t length, u8 vtag)
274  {
275  	struct mei_cl *cl = cldev->cl;
276  
277  	return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
278  }
279  EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
280  
281  /**
282   * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write)
283   *
284   * @cldev: me client device
285   * @buf: buffer to send
286   * @length: buffer length
287   * @vtag: virtual tag
288   * @timeout: send timeout in milliseconds, 0 for infinite timeout
289   *
290   * Return:
291   *  * written size in bytes
292   *  * < 0 on error
293   */
294  
mei_cldev_send_vtag_timeout(struct mei_cl_device * cldev,const u8 * buf,size_t length,u8 vtag,unsigned long timeout)295  ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
296  				    size_t length, u8 vtag, unsigned long timeout)
297  {
298  	struct mei_cl *cl = cldev->cl;
299  
300  	return __mei_cl_send_timeout(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING, timeout);
301  }
302  EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout);
303  
304  /**
305   * mei_cldev_recv_vtag - client receive with vtag (read)
306   *
307   * @cldev: me client device
308   * @buf: buffer to receive
309   * @length: buffer length
310   * @vtag: virtual tag
311   *
312   * Return:
313   * * read size in bytes
314   * *  < 0 on error
315   */
316  
mei_cldev_recv_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)317  ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
318  			    u8 *vtag)
319  {
320  	struct mei_cl *cl = cldev->cl;
321  
322  	return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
323  }
324  EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
325  
326  /**
327   * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
328   *
329   * @cldev: me client device
330   * @buf: buffer to receive
331   * @length: buffer length
332   * @vtag: virtual tag
333   *
334   * Return:
335   * * read size in bytes
336   * * -EAGAIN if function will block.
337   * * < 0 on other error
338   */
mei_cldev_recv_nonblock_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)339  ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
340  				     size_t length, u8 *vtag)
341  {
342  	struct mei_cl *cl = cldev->cl;
343  
344  	return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
345  }
346  EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
347  
348  /**
349   * mei_cldev_recv_timeout - client receive with timeout (read)
350   *
351   * @cldev: me client device
352   * @buf: buffer to receive
353   * @length: buffer length
354   * @timeout: send timeout in milliseconds, 0 for infinite timeout
355   *
356   * Return:
357   * * read size in bytes
358   * *  < 0 on error
359   */
mei_cldev_recv_timeout(struct mei_cl_device * cldev,u8 * buf,size_t length,unsigned long timeout)360  ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
361  			       unsigned long timeout)
362  {
363  	return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout);
364  }
365  EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout);
366  
367  /**
368   * mei_cldev_recv_vtag_timeout - client receive with vtag (read)
369   *
370   * @cldev: me client device
371   * @buf: buffer to receive
372   * @length: buffer length
373   * @vtag: virtual tag
374   * @timeout: recv timeout in milliseconds, 0 for infinite timeout
375   *
376   * Return:
377   * * read size in bytes
378   * *  < 0 on error
379   */
380  
mei_cldev_recv_vtag_timeout(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag,unsigned long timeout)381  ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
382  				    u8 *vtag, unsigned long timeout)
383  {
384  	struct mei_cl *cl = cldev->cl;
385  
386  	return __mei_cl_recv(cl, buf, length, vtag, 0, timeout);
387  }
388  EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout);
389  
390  /**
391   * mei_cldev_send - me device send (write)
392   *
393   * @cldev: me client device
394   * @buf: buffer to send
395   * @length: buffer length
396   *
397   * Return:
398   *  * written size in bytes
399   *  * < 0 on error
400   */
mei_cldev_send(struct mei_cl_device * cldev,const u8 * buf,size_t length)401  ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
402  {
403  	return mei_cldev_send_vtag(cldev, buf, length, 0);
404  }
405  EXPORT_SYMBOL_GPL(mei_cldev_send);
406  
407  /**
408   * mei_cldev_send_timeout - me device send with timeout (write)
409   *
410   * @cldev: me client device
411   * @buf: buffer to send
412   * @length: buffer length
413   * @timeout: send timeout in milliseconds, 0 for infinite timeout
414   *
415   * Return:
416   *  * written size in bytes
417   *  * < 0 on error
418   */
mei_cldev_send_timeout(struct mei_cl_device * cldev,const u8 * buf,size_t length,unsigned long timeout)419  ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length,
420  			       unsigned long timeout)
421  {
422  	return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout);
423  }
424  EXPORT_SYMBOL_GPL(mei_cldev_send_timeout);
425  
426  /**
427   * mei_cldev_recv - client receive (read)
428   *
429   * @cldev: me client device
430   * @buf: buffer to receive
431   * @length: buffer length
432   *
433   * Return: read size in bytes of < 0 on error
434   */
mei_cldev_recv(struct mei_cl_device * cldev,u8 * buf,size_t length)435  ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
436  {
437  	return mei_cldev_recv_vtag(cldev, buf, length, NULL);
438  }
439  EXPORT_SYMBOL_GPL(mei_cldev_recv);
440  
441  /**
442   * mei_cldev_recv_nonblock - non block client receive (read)
443   *
444   * @cldev: me client device
445   * @buf: buffer to receive
446   * @length: buffer length
447   *
448   * Return: read size in bytes of < 0 on error
449   *         -EAGAIN if function will block.
450   */
mei_cldev_recv_nonblock(struct mei_cl_device * cldev,u8 * buf,size_t length)451  ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
452  				size_t length)
453  {
454  	return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
455  }
456  EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
457  
458  /**
459   * mei_cl_bus_rx_work - dispatch rx event for a bus device
460   *
461   * @work: work
462   */
mei_cl_bus_rx_work(struct work_struct * work)463  static void mei_cl_bus_rx_work(struct work_struct *work)
464  {
465  	struct mei_cl_device *cldev;
466  	struct mei_device *bus;
467  
468  	cldev = container_of(work, struct mei_cl_device, rx_work);
469  
470  	bus = cldev->bus;
471  
472  	if (cldev->rx_cb)
473  		cldev->rx_cb(cldev);
474  
475  	mutex_lock(&bus->device_lock);
476  	if (mei_cl_is_connected(cldev->cl))
477  		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
478  	mutex_unlock(&bus->device_lock);
479  }
480  
481  /**
482   * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
483   *
484   * @work: work
485   */
mei_cl_bus_notif_work(struct work_struct * work)486  static void mei_cl_bus_notif_work(struct work_struct *work)
487  {
488  	struct mei_cl_device *cldev;
489  
490  	cldev = container_of(work, struct mei_cl_device, notif_work);
491  
492  	if (cldev->notif_cb)
493  		cldev->notif_cb(cldev);
494  }
495  
496  /**
497   * mei_cl_bus_notify_event - schedule notify cb on bus client
498   *
499   * @cl: host client
500   *
501   * Return: true if event was scheduled
502   *         false if the client is not waiting for event
503   */
mei_cl_bus_notify_event(struct mei_cl * cl)504  bool mei_cl_bus_notify_event(struct mei_cl *cl)
505  {
506  	struct mei_cl_device *cldev = cl->cldev;
507  
508  	if (!cldev || !cldev->notif_cb)
509  		return false;
510  
511  	if (!cl->notify_ev)
512  		return false;
513  
514  	schedule_work(&cldev->notif_work);
515  
516  	cl->notify_ev = false;
517  
518  	return true;
519  }
520  
521  /**
522   * mei_cl_bus_rx_event - schedule rx event
523   *
524   * @cl: host client
525   *
526   * Return: true if event was scheduled
527   *         false if the client is not waiting for event
528   */
mei_cl_bus_rx_event(struct mei_cl * cl)529  bool mei_cl_bus_rx_event(struct mei_cl *cl)
530  {
531  	struct mei_cl_device *cldev = cl->cldev;
532  
533  	if (!cldev || !cldev->rx_cb)
534  		return false;
535  
536  	schedule_work(&cldev->rx_work);
537  
538  	return true;
539  }
540  
541  /**
542   * mei_cldev_register_rx_cb - register Rx event callback
543   *
544   * @cldev: me client devices
545   * @rx_cb: callback function
546   *
547   * Return: 0 on success
548   *         -EALREADY if an callback is already registered
549   *         <0 on other errors
550   */
mei_cldev_register_rx_cb(struct mei_cl_device * cldev,mei_cldev_cb_t rx_cb)551  int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
552  {
553  	struct mei_device *bus = cldev->bus;
554  	int ret;
555  
556  	if (!rx_cb)
557  		return -EINVAL;
558  	if (cldev->rx_cb)
559  		return -EALREADY;
560  
561  	cldev->rx_cb = rx_cb;
562  	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
563  
564  	mutex_lock(&bus->device_lock);
565  	if (mei_cl_is_connected(cldev->cl))
566  		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
567  	else
568  		ret = -ENODEV;
569  	mutex_unlock(&bus->device_lock);
570  	if (ret && ret != -EBUSY) {
571  		cancel_work_sync(&cldev->rx_work);
572  		cldev->rx_cb = NULL;
573  		return ret;
574  	}
575  
576  	return 0;
577  }
578  EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
579  
580  /**
581   * mei_cldev_register_notif_cb - register FW notification event callback
582   *
583   * @cldev: me client devices
584   * @notif_cb: callback function
585   *
586   * Return: 0 on success
587   *         -EALREADY if an callback is already registered
588   *         <0 on other errors
589   */
mei_cldev_register_notif_cb(struct mei_cl_device * cldev,mei_cldev_cb_t notif_cb)590  int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
591  				mei_cldev_cb_t notif_cb)
592  {
593  	struct mei_device *bus = cldev->bus;
594  	int ret;
595  
596  	if (!notif_cb)
597  		return -EINVAL;
598  
599  	if (cldev->notif_cb)
600  		return -EALREADY;
601  
602  	cldev->notif_cb = notif_cb;
603  	INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
604  
605  	mutex_lock(&bus->device_lock);
606  	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
607  	mutex_unlock(&bus->device_lock);
608  	if (ret) {
609  		cancel_work_sync(&cldev->notif_work);
610  		cldev->notif_cb = NULL;
611  		return ret;
612  	}
613  
614  	return 0;
615  }
616  EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
617  
618  /**
619   * mei_cldev_get_drvdata - driver data getter
620   *
621   * @cldev: mei client device
622   *
623   * Return: driver private data
624   */
mei_cldev_get_drvdata(const struct mei_cl_device * cldev)625  void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
626  {
627  	return dev_get_drvdata(&cldev->dev);
628  }
629  EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
630  
631  /**
632   * mei_cldev_set_drvdata - driver data setter
633   *
634   * @cldev: mei client device
635   * @data: data to store
636   */
mei_cldev_set_drvdata(struct mei_cl_device * cldev,void * data)637  void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
638  {
639  	dev_set_drvdata(&cldev->dev, data);
640  }
641  EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
642  
643  /**
644   * mei_cldev_uuid - return uuid of the underlying me client
645   *
646   * @cldev: mei client device
647   *
648   * Return: me client uuid
649   */
mei_cldev_uuid(const struct mei_cl_device * cldev)650  const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
651  {
652  	return mei_me_cl_uuid(cldev->me_cl);
653  }
654  EXPORT_SYMBOL_GPL(mei_cldev_uuid);
655  
656  /**
657   * mei_cldev_ver - return protocol version of the underlying me client
658   *
659   * @cldev: mei client device
660   *
661   * Return: me client protocol version
662   */
mei_cldev_ver(const struct mei_cl_device * cldev)663  u8 mei_cldev_ver(const struct mei_cl_device *cldev)
664  {
665  	return mei_me_cl_ver(cldev->me_cl);
666  }
667  EXPORT_SYMBOL_GPL(mei_cldev_ver);
668  
669  /**
670   * mei_cldev_enabled - check whether the device is enabled
671   *
672   * @cldev: mei client device
673   *
674   * Return: true if me client is initialized and connected
675   */
mei_cldev_enabled(const struct mei_cl_device * cldev)676  bool mei_cldev_enabled(const struct mei_cl_device *cldev)
677  {
678  	return mei_cl_is_connected(cldev->cl);
679  }
680  EXPORT_SYMBOL_GPL(mei_cldev_enabled);
681  
682  /**
683   * mei_cl_bus_module_get - acquire module of the underlying
684   *    hw driver.
685   *
686   * @cldev: mei client device
687   *
688   * Return: true on success; false if the module was removed.
689   */
mei_cl_bus_module_get(struct mei_cl_device * cldev)690  static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
691  {
692  	return try_module_get(cldev->bus->dev->driver->owner);
693  }
694  
695  /**
696   * mei_cl_bus_module_put -  release the underlying hw module.
697   *
698   * @cldev: mei client device
699   */
mei_cl_bus_module_put(struct mei_cl_device * cldev)700  static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
701  {
702  	module_put(cldev->bus->dev->driver->owner);
703  }
704  
705  /**
706   * mei_cl_bus_vtag - get bus vtag entry wrapper
707   *     The tag for bus client is always first.
708   *
709   * @cl: host client
710   *
711   * Return: bus vtag or NULL
712   */
mei_cl_bus_vtag(struct mei_cl * cl)713  static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
714  {
715  	return list_first_entry_or_null(&cl->vtag_map,
716  					struct mei_cl_vtag, list);
717  }
718  
719  /**
720   * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
721   *
722   * @cldev: me client device
723   *
724   * Return:
725   * * 0 on success
726   * * -ENOMEM if memory allocation failed
727   */
mei_cl_bus_vtag_alloc(struct mei_cl_device * cldev)728  static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
729  {
730  	struct mei_cl *cl = cldev->cl;
731  	struct mei_cl_vtag *cl_vtag;
732  
733  	/*
734  	 * Bail out if the client does not supports vtags
735  	 * or has already allocated one
736  	 */
737  	if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
738  		return 0;
739  
740  	cl_vtag = mei_cl_vtag_alloc(NULL, 0);
741  	if (IS_ERR(cl_vtag))
742  		return -ENOMEM;
743  
744  	list_add_tail(&cl_vtag->list, &cl->vtag_map);
745  
746  	return 0;
747  }
748  
749  /**
750   * mei_cl_bus_vtag_free - remove the bus entry from vtag map
751   *
752   * @cldev: me client device
753   */
mei_cl_bus_vtag_free(struct mei_cl_device * cldev)754  static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
755  {
756  	struct mei_cl *cl = cldev->cl;
757  	struct mei_cl_vtag *cl_vtag;
758  
759  	cl_vtag = mei_cl_bus_vtag(cl);
760  	if (!cl_vtag)
761  		return;
762  
763  	list_del(&cl_vtag->list);
764  	kfree(cl_vtag);
765  }
766  
mei_cldev_dma_map(struct mei_cl_device * cldev,u8 buffer_id,size_t size)767  void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
768  {
769  	struct mei_device *bus;
770  	struct mei_cl *cl;
771  	int ret;
772  
773  	if (!cldev || !buffer_id || !size)
774  		return ERR_PTR(-EINVAL);
775  
776  	if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
777  		dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
778  			MEI_FW_PAGE_SIZE);
779  		return ERR_PTR(-EINVAL);
780  	}
781  
782  	cl = cldev->cl;
783  	bus = cldev->bus;
784  
785  	mutex_lock(&bus->device_lock);
786  	if (cl->state == MEI_FILE_UNINITIALIZED) {
787  		ret = mei_cl_link(cl);
788  		if (ret)
789  			goto notlinked;
790  		/* update pointers */
791  		cl->cldev = cldev;
792  	}
793  
794  	ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
795  	if (ret)
796  		mei_cl_unlink(cl);
797  notlinked:
798  	mutex_unlock(&bus->device_lock);
799  	if (ret)
800  		return ERR_PTR(ret);
801  	return cl->dma.vaddr;
802  }
803  EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
804  
mei_cldev_dma_unmap(struct mei_cl_device * cldev)805  int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
806  {
807  	struct mei_device *bus;
808  	struct mei_cl *cl;
809  	int ret;
810  
811  	if (!cldev)
812  		return -EINVAL;
813  
814  	cl = cldev->cl;
815  	bus = cldev->bus;
816  
817  	mutex_lock(&bus->device_lock);
818  	ret = mei_cl_dma_unmap(cl, NULL);
819  
820  	mei_cl_flush_queues(cl, NULL);
821  	mei_cl_unlink(cl);
822  	mutex_unlock(&bus->device_lock);
823  	return ret;
824  }
825  EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
826  
827  /**
828   * mei_cldev_enable - enable me client device
829   *     create connection with me client
830   *
831   * @cldev: me client device
832   *
833   * Return: 0 on success and < 0 on error
834   */
mei_cldev_enable(struct mei_cl_device * cldev)835  int mei_cldev_enable(struct mei_cl_device *cldev)
836  {
837  	struct mei_device *bus = cldev->bus;
838  	struct mei_cl *cl;
839  	int ret;
840  
841  	cl = cldev->cl;
842  
843  	mutex_lock(&bus->device_lock);
844  	if (cl->state == MEI_FILE_UNINITIALIZED) {
845  		ret = mei_cl_link(cl);
846  		if (ret)
847  			goto notlinked;
848  		/* update pointers */
849  		cl->cldev = cldev;
850  	}
851  
852  	if (mei_cl_is_connected(cl)) {
853  		ret = 0;
854  		goto out;
855  	}
856  
857  	if (!mei_me_cl_is_active(cldev->me_cl)) {
858  		dev_err(&cldev->dev, "me client is not active\n");
859  		ret = -ENOTTY;
860  		goto out;
861  	}
862  
863  	ret = mei_cl_bus_vtag_alloc(cldev);
864  	if (ret)
865  		goto out;
866  
867  	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
868  	if (ret < 0) {
869  		dev_err(&cldev->dev, "cannot connect\n");
870  		mei_cl_bus_vtag_free(cldev);
871  	}
872  
873  out:
874  	if (ret)
875  		mei_cl_unlink(cl);
876  notlinked:
877  	mutex_unlock(&bus->device_lock);
878  
879  	return ret;
880  }
881  EXPORT_SYMBOL_GPL(mei_cldev_enable);
882  
883  /**
884   * mei_cldev_unregister_callbacks - internal wrapper for unregistering
885   *  callbacks.
886   *
887   * @cldev: client device
888   */
mei_cldev_unregister_callbacks(struct mei_cl_device * cldev)889  static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
890  {
891  	if (cldev->rx_cb) {
892  		cancel_work_sync(&cldev->rx_work);
893  		cldev->rx_cb = NULL;
894  	}
895  
896  	if (cldev->notif_cb) {
897  		cancel_work_sync(&cldev->notif_work);
898  		cldev->notif_cb = NULL;
899  	}
900  }
901  
902  /**
903   * mei_cldev_disable - disable me client device
904   *     disconnect form the me client
905   *
906   * @cldev: me client device
907   *
908   * Return: 0 on success and < 0 on error
909   */
mei_cldev_disable(struct mei_cl_device * cldev)910  int mei_cldev_disable(struct mei_cl_device *cldev)
911  {
912  	struct mei_device *bus;
913  	struct mei_cl *cl;
914  	int err;
915  
916  	if (!cldev)
917  		return -ENODEV;
918  
919  	cl = cldev->cl;
920  
921  	bus = cldev->bus;
922  
923  	mei_cldev_unregister_callbacks(cldev);
924  
925  	mutex_lock(&bus->device_lock);
926  
927  	mei_cl_bus_vtag_free(cldev);
928  
929  	if (!mei_cl_is_connected(cl)) {
930  		dev_dbg(bus->dev, "Already disconnected\n");
931  		err = 0;
932  		goto out;
933  	}
934  
935  	err = mei_cl_disconnect(cl);
936  	if (err < 0)
937  		dev_err(bus->dev, "Could not disconnect from the ME client\n");
938  
939  out:
940  	/* Flush queues and remove any pending read unless we have mapped DMA */
941  	if (!cl->dma_mapped) {
942  		mei_cl_flush_queues(cl, NULL);
943  		mei_cl_unlink(cl);
944  	}
945  
946  	mutex_unlock(&bus->device_lock);
947  	return err;
948  }
949  EXPORT_SYMBOL_GPL(mei_cldev_disable);
950  
951  /**
952   * mei_cldev_send_gsc_command - sends a gsc command, by sending
953   * a gsl mei message to gsc and receiving reply from gsc
954   *
955   * @cldev: me client device
956   * @client_id: client id to send the command to
957   * @fence_id: fence id to send the command to
958   * @sg_in: scatter gather list containing addresses for rx message buffer
959   * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
960   * @sg_out: scatter gather list containing addresses for tx message buffer
961   *
962   * Return:
963   *  * written size in bytes
964   *  * < 0 on error
965   */
mei_cldev_send_gsc_command(struct mei_cl_device * cldev,u8 client_id,u32 fence_id,struct scatterlist * sg_in,size_t total_in_len,struct scatterlist * sg_out)966  ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
967  				   u8 client_id, u32 fence_id,
968  				   struct scatterlist *sg_in,
969  				   size_t total_in_len,
970  				   struct scatterlist *sg_out)
971  {
972  	struct mei_cl *cl;
973  	struct mei_device *bus;
974  	ssize_t ret = 0;
975  
976  	struct mei_ext_hdr_gsc_h2f *ext_hdr;
977  	size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
978  	int sg_out_nents, sg_in_nents;
979  	int i;
980  	struct scatterlist *sg;
981  	struct mei_ext_hdr_gsc_f2h rx_msg;
982  	unsigned int sg_len;
983  
984  	if (!cldev || !sg_in || !sg_out)
985  		return -EINVAL;
986  
987  	cl = cldev->cl;
988  	bus = cldev->bus;
989  
990  	dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
991  
992  	if (!bus->hbm_f_gsc_supported)
993  		return -EOPNOTSUPP;
994  
995  	sg_out_nents = sg_nents(sg_out);
996  	sg_in_nents = sg_nents(sg_in);
997  	/* at least one entry in tx and rx sgls must be present */
998  	if (sg_out_nents <= 0 || sg_in_nents <= 0)
999  		return -EINVAL;
1000  
1001  	buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
1002  	ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
1003  	if (!ext_hdr)
1004  		return -ENOMEM;
1005  
1006  	/* construct the GSC message */
1007  	ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
1008  	ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
1009  
1010  	ext_hdr->client_id = client_id;
1011  	ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
1012  	ext_hdr->fence_id = fence_id;
1013  	ext_hdr->input_address_count = sg_in_nents;
1014  	ext_hdr->output_address_count = sg_out_nents;
1015  	ext_hdr->reserved[0] = 0;
1016  	ext_hdr->reserved[1] = 0;
1017  
1018  	/* copy in-sgl to the message */
1019  	for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
1020  		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
1021  		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
1022  		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
1023  		ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
1024  		total_in_len -= ext_hdr->sgl[i].length;
1025  	}
1026  
1027  	/* copy out-sgl to the message */
1028  	for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
1029  		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
1030  		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
1031  		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
1032  		ext_hdr->sgl[i].length = sg_len;
1033  	}
1034  
1035  	/* send the message to GSC */
1036  	ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
1037  	if (ret < 0) {
1038  		dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret);
1039  		goto end;
1040  	}
1041  	if (ret != buf_sz) {
1042  		dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
1043  			ret, buf_sz);
1044  		ret = -EIO;
1045  		goto end;
1046  	}
1047  
1048  	/* receive the reply from GSC, note that at this point sg_in should contain the reply */
1049  	ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
1050  
1051  	if (ret != sizeof(rx_msg)) {
1052  		dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
1053  			ret, sizeof(rx_msg));
1054  		if (ret >= 0)
1055  			ret = -EIO;
1056  		goto end;
1057  	}
1058  
1059  	/* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
1060  	if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
1061  		dev_err(bus->dev, "received client_id/fence_id  %u/%u  instead of %u/%u sent\n",
1062  			rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
1063  		ret = -EFAULT;
1064  		goto end;
1065  	}
1066  
1067  	dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n",  rx_msg.written);
1068  	ret = rx_msg.written;
1069  
1070  end:
1071  	kfree(ext_hdr);
1072  	return ret;
1073  }
1074  EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
1075  
1076  /**
1077   * mei_cl_device_find - find matching entry in the driver id table
1078   *
1079   * @cldev: me client device
1080   * @cldrv: me client driver
1081   *
1082   * Return: id on success; NULL if no id is matching
1083   */
1084  static const
mei_cl_device_find(const struct mei_cl_device * cldev,const struct mei_cl_driver * cldrv)1085  struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
1086  					    const struct mei_cl_driver *cldrv)
1087  {
1088  	const struct mei_cl_device_id *id;
1089  	const uuid_le *uuid;
1090  	u8 version;
1091  	bool match;
1092  
1093  	uuid = mei_me_cl_uuid(cldev->me_cl);
1094  	version = mei_me_cl_ver(cldev->me_cl);
1095  
1096  	id = cldrv->id_table;
1097  	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
1098  		if (!uuid_le_cmp(*uuid, id->uuid)) {
1099  			match = true;
1100  
1101  			if (cldev->name[0])
1102  				if (strncmp(cldev->name, id->name,
1103  					    sizeof(id->name)))
1104  					match = false;
1105  
1106  			if (id->version != MEI_CL_VERSION_ANY)
1107  				if (id->version != version)
1108  					match = false;
1109  			if (match)
1110  				return id;
1111  		}
1112  
1113  		id++;
1114  	}
1115  
1116  	return NULL;
1117  }
1118  
1119  /**
1120   * mei_cl_device_match  - device match function
1121   *
1122   * @dev: device
1123   * @drv: driver
1124   *
1125   * Return:  1 if matching device was found 0 otherwise
1126   */
mei_cl_device_match(struct device * dev,const struct device_driver * drv)1127  static int mei_cl_device_match(struct device *dev, const struct device_driver *drv)
1128  {
1129  	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1130  	const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
1131  	const struct mei_cl_device_id *found_id;
1132  
1133  	if (!cldev->do_match)
1134  		return 0;
1135  
1136  	if (!cldrv || !cldrv->id_table)
1137  		return 0;
1138  
1139  	found_id = mei_cl_device_find(cldev, cldrv);
1140  	if (found_id)
1141  		return 1;
1142  
1143  	return 0;
1144  }
1145  
1146  /**
1147   * mei_cl_device_probe - bus probe function
1148   *
1149   * @dev: device
1150   *
1151   * Return:  0 on success; < 0 otherwise
1152   */
mei_cl_device_probe(struct device * dev)1153  static int mei_cl_device_probe(struct device *dev)
1154  {
1155  	struct mei_cl_device *cldev;
1156  	struct mei_cl_driver *cldrv;
1157  	const struct mei_cl_device_id *id;
1158  	int ret;
1159  
1160  	cldev = to_mei_cl_device(dev);
1161  	cldrv = to_mei_cl_driver(dev->driver);
1162  
1163  	if (!cldrv || !cldrv->probe)
1164  		return -ENODEV;
1165  
1166  	id = mei_cl_device_find(cldev, cldrv);
1167  	if (!id)
1168  		return -ENODEV;
1169  
1170  	if (!mei_cl_bus_module_get(cldev)) {
1171  		dev_err(&cldev->dev, "get hw module failed");
1172  		return -ENODEV;
1173  	}
1174  
1175  	ret = cldrv->probe(cldev, id);
1176  	if (ret) {
1177  		mei_cl_bus_module_put(cldev);
1178  		return ret;
1179  	}
1180  
1181  	__module_get(THIS_MODULE);
1182  	return 0;
1183  }
1184  
1185  /**
1186   * mei_cl_device_remove - remove device from the bus
1187   *
1188   * @dev: device
1189   *
1190   * Return:  0 on success; < 0 otherwise
1191   */
mei_cl_device_remove(struct device * dev)1192  static void mei_cl_device_remove(struct device *dev)
1193  {
1194  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1195  	struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
1196  
1197  	if (cldrv->remove)
1198  		cldrv->remove(cldev);
1199  
1200  	mei_cldev_unregister_callbacks(cldev);
1201  
1202  	mei_cl_bus_module_put(cldev);
1203  	module_put(THIS_MODULE);
1204  }
1205  
name_show(struct device * dev,struct device_attribute * a,char * buf)1206  static ssize_t name_show(struct device *dev, struct device_attribute *a,
1207  			     char *buf)
1208  {
1209  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1210  
1211  	return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
1212  }
1213  static DEVICE_ATTR_RO(name);
1214  
uuid_show(struct device * dev,struct device_attribute * a,char * buf)1215  static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
1216  			     char *buf)
1217  {
1218  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1219  	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1220  
1221  	return sprintf(buf, "%pUl", uuid);
1222  }
1223  static DEVICE_ATTR_RO(uuid);
1224  
version_show(struct device * dev,struct device_attribute * a,char * buf)1225  static ssize_t version_show(struct device *dev, struct device_attribute *a,
1226  			     char *buf)
1227  {
1228  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1229  	u8 version = mei_me_cl_ver(cldev->me_cl);
1230  
1231  	return sprintf(buf, "%02X", version);
1232  }
1233  static DEVICE_ATTR_RO(version);
1234  
modalias_show(struct device * dev,struct device_attribute * a,char * buf)1235  static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1236  			     char *buf)
1237  {
1238  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1239  	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1240  	u8 version = mei_me_cl_ver(cldev->me_cl);
1241  
1242  	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
1243  			 cldev->name, uuid, version);
1244  }
1245  static DEVICE_ATTR_RO(modalias);
1246  
max_conn_show(struct device * dev,struct device_attribute * a,char * buf)1247  static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1248  			     char *buf)
1249  {
1250  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1251  	u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1252  
1253  	return sprintf(buf, "%d", maxconn);
1254  }
1255  static DEVICE_ATTR_RO(max_conn);
1256  
fixed_show(struct device * dev,struct device_attribute * a,char * buf)1257  static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1258  			  char *buf)
1259  {
1260  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1261  	u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1262  
1263  	return sprintf(buf, "%d", fixed);
1264  }
1265  static DEVICE_ATTR_RO(fixed);
1266  
vtag_show(struct device * dev,struct device_attribute * a,char * buf)1267  static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1268  			 char *buf)
1269  {
1270  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1271  	bool vt = mei_me_cl_vt(cldev->me_cl);
1272  
1273  	return sprintf(buf, "%d", vt);
1274  }
1275  static DEVICE_ATTR_RO(vtag);
1276  
max_len_show(struct device * dev,struct device_attribute * a,char * buf)1277  static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1278  			    char *buf)
1279  {
1280  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1281  	u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1282  
1283  	return sprintf(buf, "%u", maxlen);
1284  }
1285  static DEVICE_ATTR_RO(max_len);
1286  
1287  static struct attribute *mei_cldev_attrs[] = {
1288  	&dev_attr_name.attr,
1289  	&dev_attr_uuid.attr,
1290  	&dev_attr_version.attr,
1291  	&dev_attr_modalias.attr,
1292  	&dev_attr_max_conn.attr,
1293  	&dev_attr_fixed.attr,
1294  	&dev_attr_vtag.attr,
1295  	&dev_attr_max_len.attr,
1296  	NULL,
1297  };
1298  ATTRIBUTE_GROUPS(mei_cldev);
1299  
1300  /**
1301   * mei_cl_device_uevent - me client bus uevent handler
1302   *
1303   * @dev: device
1304   * @env: uevent kobject
1305   *
1306   * Return: 0 on success -ENOMEM on when add_uevent_var fails
1307   */
mei_cl_device_uevent(const struct device * dev,struct kobj_uevent_env * env)1308  static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
1309  {
1310  	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1311  	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1312  	u8 version = mei_me_cl_ver(cldev->me_cl);
1313  
1314  	if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1315  		return -ENOMEM;
1316  
1317  	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1318  		return -ENOMEM;
1319  
1320  	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1321  		return -ENOMEM;
1322  
1323  	if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1324  			   cldev->name, uuid, version))
1325  		return -ENOMEM;
1326  
1327  	return 0;
1328  }
1329  
1330  static const struct bus_type mei_cl_bus_type = {
1331  	.name		= "mei",
1332  	.dev_groups	= mei_cldev_groups,
1333  	.match		= mei_cl_device_match,
1334  	.probe		= mei_cl_device_probe,
1335  	.remove		= mei_cl_device_remove,
1336  	.uevent		= mei_cl_device_uevent,
1337  };
1338  
mei_dev_bus_get(struct mei_device * bus)1339  static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1340  {
1341  	if (bus)
1342  		get_device(bus->dev);
1343  
1344  	return bus;
1345  }
1346  
mei_dev_bus_put(struct mei_device * bus)1347  static void mei_dev_bus_put(struct mei_device *bus)
1348  {
1349  	if (bus)
1350  		put_device(bus->dev);
1351  }
1352  
mei_cl_bus_dev_release(struct device * dev)1353  static void mei_cl_bus_dev_release(struct device *dev)
1354  {
1355  	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1356  
1357  	mei_cl_flush_queues(cldev->cl, NULL);
1358  	mei_me_cl_put(cldev->me_cl);
1359  	mei_dev_bus_put(cldev->bus);
1360  	kfree(cldev->cl);
1361  	kfree(cldev);
1362  }
1363  
1364  static const struct device_type mei_cl_device_type = {
1365  	.release = mei_cl_bus_dev_release,
1366  };
1367  
1368  /**
1369   * mei_cl_bus_set_name - set device name for me client device
1370   *  <controller>-<client device>
1371   *  Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1372   *
1373   * @cldev: me client device
1374   */
mei_cl_bus_set_name(struct mei_cl_device * cldev)1375  static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1376  {
1377  	dev_set_name(&cldev->dev, "%s-%pUl",
1378  		     dev_name(cldev->bus->dev),
1379  		     mei_me_cl_uuid(cldev->me_cl));
1380  }
1381  
1382  /**
1383   * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1384   *
1385   * @bus: mei device
1386   * @me_cl: me client
1387   *
1388   * Return: allocated device structure or NULL on allocation failure
1389   */
mei_cl_bus_dev_alloc(struct mei_device * bus,struct mei_me_client * me_cl)1390  static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1391  						  struct mei_me_client *me_cl)
1392  {
1393  	struct mei_cl_device *cldev;
1394  	struct mei_cl *cl;
1395  
1396  	cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1397  	if (!cldev)
1398  		return NULL;
1399  
1400  	cl = mei_cl_allocate(bus);
1401  	if (!cl) {
1402  		kfree(cldev);
1403  		return NULL;
1404  	}
1405  
1406  	device_initialize(&cldev->dev);
1407  	cldev->dev.parent = bus->dev;
1408  	cldev->dev.bus    = &mei_cl_bus_type;
1409  	cldev->dev.type   = &mei_cl_device_type;
1410  	cldev->bus        = mei_dev_bus_get(bus);
1411  	cldev->me_cl      = mei_me_cl_get(me_cl);
1412  	cldev->cl         = cl;
1413  	mei_cl_bus_set_name(cldev);
1414  	cldev->is_added   = 0;
1415  	INIT_LIST_HEAD(&cldev->bus_list);
1416  	device_enable_async_suspend(&cldev->dev);
1417  
1418  	return cldev;
1419  }
1420  
1421  /**
1422   * mei_cl_bus_dev_setup - setup me client device
1423   *    run fix up routines and set the device name
1424   *
1425   * @bus: mei device
1426   * @cldev: me client device
1427   *
1428   * Return: true if the device is eligible for enumeration
1429   */
mei_cl_bus_dev_setup(struct mei_device * bus,struct mei_cl_device * cldev)1430  static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1431  				 struct mei_cl_device *cldev)
1432  {
1433  	cldev->do_match = 1;
1434  	mei_cl_bus_dev_fixup(cldev);
1435  
1436  	/* the device name can change during fix up */
1437  	if (cldev->do_match)
1438  		mei_cl_bus_set_name(cldev);
1439  
1440  	return cldev->do_match == 1;
1441  }
1442  
1443  /**
1444   * mei_cl_bus_dev_add - add me client devices
1445   *
1446   * @cldev: me client device
1447   *
1448   * Return: 0 on success; < 0 on failure
1449   */
mei_cl_bus_dev_add(struct mei_cl_device * cldev)1450  static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1451  {
1452  	int ret;
1453  
1454  	dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
1455  		mei_me_cl_uuid(cldev->me_cl),
1456  		mei_me_cl_ver(cldev->me_cl));
1457  	ret = device_add(&cldev->dev);
1458  	if (!ret)
1459  		cldev->is_added = 1;
1460  
1461  	return ret;
1462  }
1463  
1464  /**
1465   * mei_cl_bus_dev_stop - stop the driver
1466   *
1467   * @cldev: me client device
1468   */
mei_cl_bus_dev_stop(struct mei_cl_device * cldev)1469  static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1470  {
1471  	cldev->do_match = 0;
1472  	if (cldev->is_added)
1473  		device_release_driver(&cldev->dev);
1474  }
1475  
1476  /**
1477   * mei_cl_bus_dev_destroy - destroy me client devices object
1478   *
1479   * @cldev: me client device
1480   *
1481   * Locking: called under "dev->cl_bus_lock" lock
1482   */
mei_cl_bus_dev_destroy(struct mei_cl_device * cldev)1483  static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1484  {
1485  
1486  	WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1487  
1488  	if (!cldev->is_added)
1489  		return;
1490  
1491  	device_del(&cldev->dev);
1492  
1493  	list_del_init(&cldev->bus_list);
1494  
1495  	cldev->is_added = 0;
1496  	put_device(&cldev->dev);
1497  }
1498  
1499  /**
1500   * mei_cl_bus_remove_device - remove a devices form the bus
1501   *
1502   * @cldev: me client device
1503   */
mei_cl_bus_remove_device(struct mei_cl_device * cldev)1504  static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1505  {
1506  	mei_cl_bus_dev_stop(cldev);
1507  	mei_cl_bus_dev_destroy(cldev);
1508  }
1509  
1510  /**
1511   * mei_cl_bus_remove_devices - remove all devices form the bus
1512   *
1513   * @bus: mei device
1514   */
mei_cl_bus_remove_devices(struct mei_device * bus)1515  void mei_cl_bus_remove_devices(struct mei_device *bus)
1516  {
1517  	struct mei_cl_device *cldev, *next;
1518  
1519  	mutex_lock(&bus->cl_bus_lock);
1520  	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1521  		mei_cl_bus_remove_device(cldev);
1522  	mutex_unlock(&bus->cl_bus_lock);
1523  }
1524  
1525  
1526  /**
1527   * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1528   *     based on me client
1529   *
1530   * @bus: mei device
1531   * @me_cl: me client
1532   *
1533   * Locking: called under "dev->cl_bus_lock" lock
1534   */
mei_cl_bus_dev_init(struct mei_device * bus,struct mei_me_client * me_cl)1535  static void mei_cl_bus_dev_init(struct mei_device *bus,
1536  				struct mei_me_client *me_cl)
1537  {
1538  	struct mei_cl_device *cldev;
1539  
1540  	WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1541  
1542  	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1543  
1544  	if (me_cl->bus_added)
1545  		return;
1546  
1547  	cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1548  	if (!cldev)
1549  		return;
1550  
1551  	me_cl->bus_added = true;
1552  	list_add_tail(&cldev->bus_list, &bus->device_list);
1553  
1554  }
1555  
1556  /**
1557   * mei_cl_bus_rescan - scan me clients list and add create
1558   *    devices for eligible clients
1559   *
1560   * @bus: mei device
1561   */
mei_cl_bus_rescan(struct mei_device * bus)1562  static void mei_cl_bus_rescan(struct mei_device *bus)
1563  {
1564  	struct mei_cl_device *cldev, *n;
1565  	struct mei_me_client *me_cl;
1566  
1567  	mutex_lock(&bus->cl_bus_lock);
1568  
1569  	down_read(&bus->me_clients_rwsem);
1570  	list_for_each_entry(me_cl, &bus->me_clients, list)
1571  		mei_cl_bus_dev_init(bus, me_cl);
1572  	up_read(&bus->me_clients_rwsem);
1573  
1574  	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1575  
1576  		if (!mei_me_cl_is_active(cldev->me_cl)) {
1577  			mei_cl_bus_remove_device(cldev);
1578  			continue;
1579  		}
1580  
1581  		if (cldev->is_added)
1582  			continue;
1583  
1584  		if (mei_cl_bus_dev_setup(bus, cldev))
1585  			mei_cl_bus_dev_add(cldev);
1586  		else {
1587  			list_del_init(&cldev->bus_list);
1588  			put_device(&cldev->dev);
1589  		}
1590  	}
1591  	mutex_unlock(&bus->cl_bus_lock);
1592  
1593  	dev_dbg(bus->dev, "rescan end");
1594  }
1595  
mei_cl_bus_rescan_work(struct work_struct * work)1596  void mei_cl_bus_rescan_work(struct work_struct *work)
1597  {
1598  	struct mei_device *bus =
1599  		container_of(work, struct mei_device, bus_rescan_work);
1600  
1601  	mei_cl_bus_rescan(bus);
1602  }
1603  
__mei_cldev_driver_register(struct mei_cl_driver * cldrv,struct module * owner)1604  int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1605  				struct module *owner)
1606  {
1607  	int err;
1608  
1609  	cldrv->driver.name = cldrv->name;
1610  	cldrv->driver.owner = owner;
1611  	cldrv->driver.bus = &mei_cl_bus_type;
1612  
1613  	err = driver_register(&cldrv->driver);
1614  	if (err)
1615  		return err;
1616  
1617  	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1618  
1619  	return 0;
1620  }
1621  EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1622  
mei_cldev_driver_unregister(struct mei_cl_driver * cldrv)1623  void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1624  {
1625  	driver_unregister(&cldrv->driver);
1626  
1627  	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1628  }
1629  EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1630  
1631  
mei_cl_bus_init(void)1632  int __init mei_cl_bus_init(void)
1633  {
1634  	return bus_register(&mei_cl_bus_type);
1635  }
1636  
mei_cl_bus_exit(void)1637  void __exit mei_cl_bus_exit(void)
1638  {
1639  	bus_unregister(&mei_cl_bus_type);
1640  }
1641