1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * Surface Book (gen. 2 and later) detachment system (DTX) driver.
4   *
5   * Provides a user-space interface to properly handle clipboard/tablet
6   * (containing screen and processor) detachment from the base of the device
7   * (containing the keyboard and optionally a discrete GPU). Allows to
8   * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
9   * use), or request detachment via user-space.
10   *
11   * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
12   */
13  
14  #include <linux/fs.h>
15  #include <linux/input.h>
16  #include <linux/ioctl.h>
17  #include <linux/kernel.h>
18  #include <linux/kfifo.h>
19  #include <linux/kref.h>
20  #include <linux/miscdevice.h>
21  #include <linux/module.h>
22  #include <linux/mutex.h>
23  #include <linux/platform_device.h>
24  #include <linux/poll.h>
25  #include <linux/rwsem.h>
26  #include <linux/slab.h>
27  #include <linux/workqueue.h>
28  
29  #include <linux/surface_aggregator/controller.h>
30  #include <linux/surface_aggregator/device.h>
31  #include <linux/surface_aggregator/dtx.h>
32  
33  
34  /* -- SSAM interface. ------------------------------------------------------- */
35  
36  enum sam_event_cid_bas {
37  	SAM_EVENT_CID_DTX_CONNECTION			= 0x0c,
38  	SAM_EVENT_CID_DTX_REQUEST			= 0x0e,
39  	SAM_EVENT_CID_DTX_CANCEL			= 0x0f,
40  	SAM_EVENT_CID_DTX_LATCH_STATUS			= 0x11,
41  };
42  
43  enum ssam_bas_base_state {
44  	SSAM_BAS_BASE_STATE_DETACH_SUCCESS		= 0x00,
45  	SSAM_BAS_BASE_STATE_ATTACHED			= 0x01,
46  	SSAM_BAS_BASE_STATE_NOT_FEASIBLE		= 0x02,
47  };
48  
49  enum ssam_bas_latch_status {
50  	SSAM_BAS_LATCH_STATUS_CLOSED			= 0x00,
51  	SSAM_BAS_LATCH_STATUS_OPENED			= 0x01,
52  	SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN		= 0x02,
53  	SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN	= 0x03,
54  	SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE		= 0x04,
55  };
56  
57  enum ssam_bas_cancel_reason {
58  	SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE		= 0x00,  /* Low battery. */
59  	SSAM_BAS_CANCEL_REASON_TIMEOUT			= 0x02,
60  	SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN		= 0x03,
61  	SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN	= 0x04,
62  	SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE		= 0x05,
63  };
64  
65  struct ssam_bas_base_info {
66  	u8 state;
67  	u8 base_id;
68  } __packed;
69  
70  static_assert(sizeof(struct ssam_bas_base_info) == 2);
71  
72  SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
73  	.target_category = SSAM_SSH_TC_BAS,
74  	.target_id       = SSAM_SSH_TID_SAM,
75  	.command_id      = 0x06,
76  	.instance_id     = 0x00,
77  });
78  
79  SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
80  	.target_category = SSAM_SSH_TC_BAS,
81  	.target_id       = SSAM_SSH_TID_SAM,
82  	.command_id      = 0x07,
83  	.instance_id     = 0x00,
84  });
85  
86  SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
87  	.target_category = SSAM_SSH_TC_BAS,
88  	.target_id       = SSAM_SSH_TID_SAM,
89  	.command_id      = 0x08,
90  	.instance_id     = 0x00,
91  });
92  
93  SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
94  	.target_category = SSAM_SSH_TC_BAS,
95  	.target_id       = SSAM_SSH_TID_SAM,
96  	.command_id      = 0x09,
97  	.instance_id     = 0x00,
98  });
99  
100  SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
101  	.target_category = SSAM_SSH_TC_BAS,
102  	.target_id       = SSAM_SSH_TID_SAM,
103  	.command_id      = 0x0a,
104  	.instance_id     = 0x00,
105  });
106  
107  SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
108  	.target_category = SSAM_SSH_TC_BAS,
109  	.target_id       = SSAM_SSH_TID_SAM,
110  	.command_id      = 0x0b,
111  	.instance_id     = 0x00,
112  });
113  
114  SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
115  	.target_category = SSAM_SSH_TC_BAS,
116  	.target_id       = SSAM_SSH_TID_SAM,
117  	.command_id      = 0x0c,
118  	.instance_id     = 0x00,
119  });
120  
121  SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
122  	.target_category = SSAM_SSH_TC_BAS,
123  	.target_id       = SSAM_SSH_TID_SAM,
124  	.command_id      = 0x0d,
125  	.instance_id     = 0x00,
126  });
127  
128  SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
129  	.target_category = SSAM_SSH_TC_BAS,
130  	.target_id       = SSAM_SSH_TID_SAM,
131  	.command_id      = 0x11,
132  	.instance_id     = 0x00,
133  });
134  
135  
136  /* -- Main structures. ------------------------------------------------------ */
137  
138  enum sdtx_device_state {
139  	SDTX_DEVICE_SHUTDOWN_BIT    = BIT(0),
140  	SDTX_DEVICE_DIRTY_BASE_BIT  = BIT(1),
141  	SDTX_DEVICE_DIRTY_MODE_BIT  = BIT(2),
142  	SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
143  };
144  
145  struct sdtx_device {
146  	struct kref kref;
147  	struct rw_semaphore lock;         /* Guards device and controller reference. */
148  
149  	struct device *dev;
150  	struct ssam_controller *ctrl;
151  	unsigned long flags;
152  
153  	struct miscdevice mdev;
154  	wait_queue_head_t waitq;
155  	struct mutex write_lock;          /* Guards order of events/notifications. */
156  	struct rw_semaphore client_lock;  /* Guards client list.                   */
157  	struct list_head client_list;
158  
159  	struct delayed_work state_work;
160  	struct {
161  		struct ssam_bas_base_info base;
162  		u8 device_mode;
163  		u8 latch_status;
164  	} state;
165  
166  	struct delayed_work mode_work;
167  	struct input_dev *mode_switch;
168  
169  	struct ssam_event_notifier notif;
170  };
171  
172  enum sdtx_client_state {
173  	SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
174  };
175  
176  struct sdtx_client {
177  	struct sdtx_device *ddev;
178  	struct list_head node;
179  	unsigned long flags;
180  
181  	struct fasync_struct *fasync;
182  
183  	struct mutex read_lock;           /* Guards FIFO buffer read access. */
184  	DECLARE_KFIFO(buffer, u8, 512);
185  };
186  
__sdtx_device_release(struct kref * kref)187  static void __sdtx_device_release(struct kref *kref)
188  {
189  	struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
190  
191  	mutex_destroy(&ddev->write_lock);
192  	kfree(ddev);
193  }
194  
sdtx_device_get(struct sdtx_device * ddev)195  static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
196  {
197  	if (ddev)
198  		kref_get(&ddev->kref);
199  
200  	return ddev;
201  }
202  
sdtx_device_put(struct sdtx_device * ddev)203  static void sdtx_device_put(struct sdtx_device *ddev)
204  {
205  	if (ddev)
206  		kref_put(&ddev->kref, __sdtx_device_release);
207  }
208  
209  
210  /* -- Firmware value translations. ------------------------------------------ */
211  
sdtx_translate_base_state(struct sdtx_device * ddev,u8 state)212  static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
213  {
214  	switch (state) {
215  	case SSAM_BAS_BASE_STATE_ATTACHED:
216  		return SDTX_BASE_ATTACHED;
217  
218  	case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
219  		return SDTX_BASE_DETACHED;
220  
221  	case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
222  		return SDTX_DETACH_NOT_FEASIBLE;
223  
224  	default:
225  		dev_err(ddev->dev, "unknown base state: %#04x\n", state);
226  		return SDTX_UNKNOWN(state);
227  	}
228  }
229  
sdtx_translate_latch_status(struct sdtx_device * ddev,u8 status)230  static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
231  {
232  	switch (status) {
233  	case SSAM_BAS_LATCH_STATUS_CLOSED:
234  		return SDTX_LATCH_CLOSED;
235  
236  	case SSAM_BAS_LATCH_STATUS_OPENED:
237  		return SDTX_LATCH_OPENED;
238  
239  	case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
240  		return SDTX_ERR_FAILED_TO_OPEN;
241  
242  	case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
243  		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
244  
245  	case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
246  		return SDTX_ERR_FAILED_TO_CLOSE;
247  
248  	default:
249  		dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
250  		return SDTX_UNKNOWN(status);
251  	}
252  }
253  
sdtx_translate_cancel_reason(struct sdtx_device * ddev,u8 reason)254  static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
255  {
256  	switch (reason) {
257  	case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
258  		return SDTX_DETACH_NOT_FEASIBLE;
259  
260  	case SSAM_BAS_CANCEL_REASON_TIMEOUT:
261  		return SDTX_DETACH_TIMEDOUT;
262  
263  	case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
264  		return SDTX_ERR_FAILED_TO_OPEN;
265  
266  	case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
267  		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
268  
269  	case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
270  		return SDTX_ERR_FAILED_TO_CLOSE;
271  
272  	default:
273  		dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
274  		return SDTX_UNKNOWN(reason);
275  	}
276  }
277  
278  
279  /* -- IOCTLs. --------------------------------------------------------------- */
280  
sdtx_ioctl_get_base_info(struct sdtx_device * ddev,struct sdtx_base_info __user * buf)281  static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
282  				    struct sdtx_base_info __user *buf)
283  {
284  	struct ssam_bas_base_info raw;
285  	struct sdtx_base_info info;
286  	int status;
287  
288  	lockdep_assert_held_read(&ddev->lock);
289  
290  	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
291  	if (status < 0)
292  		return status;
293  
294  	info.state = sdtx_translate_base_state(ddev, raw.state);
295  	info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
296  
297  	if (copy_to_user(buf, &info, sizeof(info)))
298  		return -EFAULT;
299  
300  	return 0;
301  }
302  
sdtx_ioctl_get_device_mode(struct sdtx_device * ddev,u16 __user * buf)303  static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
304  {
305  	u8 mode;
306  	int status;
307  
308  	lockdep_assert_held_read(&ddev->lock);
309  
310  	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
311  	if (status < 0)
312  		return status;
313  
314  	return put_user(mode, buf);
315  }
316  
sdtx_ioctl_get_latch_status(struct sdtx_device * ddev,u16 __user * buf)317  static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
318  {
319  	u8 latch;
320  	int status;
321  
322  	lockdep_assert_held_read(&ddev->lock);
323  
324  	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
325  	if (status < 0)
326  		return status;
327  
328  	return put_user(sdtx_translate_latch_status(ddev, latch), buf);
329  }
330  
__surface_dtx_ioctl(struct sdtx_client * client,unsigned int cmd,unsigned long arg)331  static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
332  {
333  	struct sdtx_device *ddev = client->ddev;
334  
335  	lockdep_assert_held_read(&ddev->lock);
336  
337  	switch (cmd) {
338  	case SDTX_IOCTL_EVENTS_ENABLE:
339  		set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
340  		return 0;
341  
342  	case SDTX_IOCTL_EVENTS_DISABLE:
343  		clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
344  		return 0;
345  
346  	case SDTX_IOCTL_LATCH_LOCK:
347  		return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
348  
349  	case SDTX_IOCTL_LATCH_UNLOCK:
350  		return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
351  
352  	case SDTX_IOCTL_LATCH_REQUEST:
353  		return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
354  
355  	case SDTX_IOCTL_LATCH_CONFIRM:
356  		return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
357  
358  	case SDTX_IOCTL_LATCH_HEARTBEAT:
359  		return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
360  
361  	case SDTX_IOCTL_LATCH_CANCEL:
362  		return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
363  
364  	case SDTX_IOCTL_GET_BASE_INFO:
365  		return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
366  
367  	case SDTX_IOCTL_GET_DEVICE_MODE:
368  		return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
369  
370  	case SDTX_IOCTL_GET_LATCH_STATUS:
371  		return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
372  
373  	default:
374  		return -EINVAL;
375  	}
376  }
377  
surface_dtx_ioctl(struct file * file,unsigned int cmd,unsigned long arg)378  static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
379  {
380  	struct sdtx_client *client = file->private_data;
381  	long status;
382  
383  	if (down_read_killable(&client->ddev->lock))
384  		return -ERESTARTSYS;
385  
386  	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
387  		up_read(&client->ddev->lock);
388  		return -ENODEV;
389  	}
390  
391  	status = __surface_dtx_ioctl(client, cmd, arg);
392  
393  	up_read(&client->ddev->lock);
394  	return status;
395  }
396  
397  
398  /* -- File operations. ------------------------------------------------------ */
399  
surface_dtx_open(struct inode * inode,struct file * file)400  static int surface_dtx_open(struct inode *inode, struct file *file)
401  {
402  	struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
403  	struct sdtx_client *client;
404  
405  	/* Initialize client. */
406  	client = kzalloc(sizeof(*client), GFP_KERNEL);
407  	if (!client)
408  		return -ENOMEM;
409  
410  	client->ddev = sdtx_device_get(ddev);
411  
412  	INIT_LIST_HEAD(&client->node);
413  
414  	mutex_init(&client->read_lock);
415  	INIT_KFIFO(client->buffer);
416  
417  	file->private_data = client;
418  
419  	/* Attach client. */
420  	down_write(&ddev->client_lock);
421  
422  	/*
423  	 * Do not add a new client if the device has been shut down. Note that
424  	 * it's enough to hold the client_lock here as, during shutdown, we
425  	 * only acquire that lock and remove clients after marking the device
426  	 * as shut down.
427  	 */
428  	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
429  		up_write(&ddev->client_lock);
430  		mutex_destroy(&client->read_lock);
431  		sdtx_device_put(client->ddev);
432  		kfree(client);
433  		return -ENODEV;
434  	}
435  
436  	list_add_tail(&client->node, &ddev->client_list);
437  	up_write(&ddev->client_lock);
438  
439  	stream_open(inode, file);
440  	return 0;
441  }
442  
surface_dtx_release(struct inode * inode,struct file * file)443  static int surface_dtx_release(struct inode *inode, struct file *file)
444  {
445  	struct sdtx_client *client = file->private_data;
446  
447  	/* Detach client. */
448  	down_write(&client->ddev->client_lock);
449  	list_del(&client->node);
450  	up_write(&client->ddev->client_lock);
451  
452  	/* Free client. */
453  	sdtx_device_put(client->ddev);
454  	mutex_destroy(&client->read_lock);
455  	kfree(client);
456  
457  	return 0;
458  }
459  
surface_dtx_read(struct file * file,char __user * buf,size_t count,loff_t * offs)460  static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
461  {
462  	struct sdtx_client *client = file->private_data;
463  	struct sdtx_device *ddev = client->ddev;
464  	unsigned int copied;
465  	int status = 0;
466  
467  	if (down_read_killable(&ddev->lock))
468  		return -ERESTARTSYS;
469  
470  	/* Make sure we're not shut down. */
471  	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
472  		up_read(&ddev->lock);
473  		return -ENODEV;
474  	}
475  
476  	do {
477  		/* Check availability, wait if necessary. */
478  		if (kfifo_is_empty(&client->buffer)) {
479  			up_read(&ddev->lock);
480  
481  			if (file->f_flags & O_NONBLOCK)
482  				return -EAGAIN;
483  
484  			status = wait_event_interruptible(ddev->waitq,
485  							  !kfifo_is_empty(&client->buffer) ||
486  							  test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
487  								   &ddev->flags));
488  			if (status < 0)
489  				return status;
490  
491  			if (down_read_killable(&ddev->lock))
492  				return -ERESTARTSYS;
493  
494  			/* Need to check that we're not shut down again. */
495  			if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
496  				up_read(&ddev->lock);
497  				return -ENODEV;
498  			}
499  		}
500  
501  		/* Try to read from FIFO. */
502  		if (mutex_lock_interruptible(&client->read_lock)) {
503  			up_read(&ddev->lock);
504  			return -ERESTARTSYS;
505  		}
506  
507  		status = kfifo_to_user(&client->buffer, buf, count, &copied);
508  		mutex_unlock(&client->read_lock);
509  
510  		if (status < 0) {
511  			up_read(&ddev->lock);
512  			return status;
513  		}
514  
515  		/* We might not have gotten anything, check this here. */
516  		if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
517  			up_read(&ddev->lock);
518  			return -EAGAIN;
519  		}
520  	} while (copied == 0);
521  
522  	up_read(&ddev->lock);
523  	return copied;
524  }
525  
surface_dtx_poll(struct file * file,struct poll_table_struct * pt)526  static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
527  {
528  	struct sdtx_client *client = file->private_data;
529  	__poll_t events = 0;
530  
531  	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
532  		return EPOLLHUP | EPOLLERR;
533  
534  	poll_wait(file, &client->ddev->waitq, pt);
535  
536  	if (!kfifo_is_empty(&client->buffer))
537  		events |= EPOLLIN | EPOLLRDNORM;
538  
539  	return events;
540  }
541  
surface_dtx_fasync(int fd,struct file * file,int on)542  static int surface_dtx_fasync(int fd, struct file *file, int on)
543  {
544  	struct sdtx_client *client = file->private_data;
545  
546  	return fasync_helper(fd, file, on, &client->fasync);
547  }
548  
549  static const struct file_operations surface_dtx_fops = {
550  	.owner          = THIS_MODULE,
551  	.open           = surface_dtx_open,
552  	.release        = surface_dtx_release,
553  	.read           = surface_dtx_read,
554  	.poll           = surface_dtx_poll,
555  	.fasync         = surface_dtx_fasync,
556  	.unlocked_ioctl = surface_dtx_ioctl,
557  	.compat_ioctl   = surface_dtx_ioctl,
558  };
559  
560  
561  /* -- Event handling/forwarding. -------------------------------------------- */
562  
563  /*
564   * The device operation mode is not immediately updated on the EC when the
565   * base has been connected, i.e. querying the device mode inside the
566   * connection event callback yields an outdated value. Thus, we can only
567   * determine the new tablet-mode switch and device mode values after some
568   * time.
569   *
570   * These delays have been chosen by experimenting. We first delay on connect
571   * events, then check and validate the device mode against the base state and
572   * if invalid delay again by the "recheck" delay.
573   */
574  #define SDTX_DEVICE_MODE_DELAY_CONNECT	msecs_to_jiffies(100)
575  #define SDTX_DEVICE_MODE_DELAY_RECHECK	msecs_to_jiffies(100)
576  
577  struct sdtx_status_event {
578  	struct sdtx_event e;
579  	__u16 v;
580  } __packed;
581  
582  struct sdtx_base_info_event {
583  	struct sdtx_event e;
584  	struct sdtx_base_info v;
585  } __packed;
586  
587  union sdtx_generic_event {
588  	struct sdtx_event common;
589  	struct sdtx_status_event status;
590  	struct sdtx_base_info_event base;
591  };
592  
593  static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
594  
595  /* Must be executed with ddev->write_lock held. */
sdtx_push_event(struct sdtx_device * ddev,struct sdtx_event * evt)596  static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
597  {
598  	const size_t len = sizeof(struct sdtx_event) + evt->length;
599  	struct sdtx_client *client;
600  
601  	lockdep_assert_held(&ddev->write_lock);
602  
603  	down_read(&ddev->client_lock);
604  	list_for_each_entry(client, &ddev->client_list, node) {
605  		if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
606  			continue;
607  
608  		if (likely(kfifo_avail(&client->buffer) >= len))
609  			kfifo_in(&client->buffer, (const u8 *)evt, len);
610  		else
611  			dev_warn(ddev->dev, "event buffer overrun\n");
612  
613  		kill_fasync(&client->fasync, SIGIO, POLL_IN);
614  	}
615  	up_read(&ddev->client_lock);
616  
617  	wake_up_interruptible(&ddev->waitq);
618  }
619  
sdtx_notifier(struct ssam_event_notifier * nf,const struct ssam_event * in)620  static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
621  {
622  	struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
623  	union sdtx_generic_event event;
624  	size_t len;
625  
626  	/* Validate event payload length. */
627  	switch (in->command_id) {
628  	case SAM_EVENT_CID_DTX_CONNECTION:
629  		len = 2 * sizeof(u8);
630  		break;
631  
632  	case SAM_EVENT_CID_DTX_REQUEST:
633  		len = 0;
634  		break;
635  
636  	case SAM_EVENT_CID_DTX_CANCEL:
637  		len = sizeof(u8);
638  		break;
639  
640  	case SAM_EVENT_CID_DTX_LATCH_STATUS:
641  		len = sizeof(u8);
642  		break;
643  
644  	default:
645  		return 0;
646  	}
647  
648  	if (in->length != len) {
649  		dev_err(ddev->dev,
650  			"unexpected payload size for event %#04x: got %u, expected %zu\n",
651  			in->command_id, in->length, len);
652  		return 0;
653  	}
654  
655  	mutex_lock(&ddev->write_lock);
656  
657  	/* Translate event. */
658  	switch (in->command_id) {
659  	case SAM_EVENT_CID_DTX_CONNECTION:
660  		clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
661  
662  		/* If state has not changed: do not send new event. */
663  		if (ddev->state.base.state == in->data[0] &&
664  		    ddev->state.base.base_id == in->data[1])
665  			goto out;
666  
667  		ddev->state.base.state = in->data[0];
668  		ddev->state.base.base_id = in->data[1];
669  
670  		event.base.e.length = sizeof(struct sdtx_base_info);
671  		event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
672  		event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
673  		event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
674  		break;
675  
676  	case SAM_EVENT_CID_DTX_REQUEST:
677  		event.common.code = SDTX_EVENT_REQUEST;
678  		event.common.length = 0;
679  		break;
680  
681  	case SAM_EVENT_CID_DTX_CANCEL:
682  		event.status.e.length = sizeof(u16);
683  		event.status.e.code = SDTX_EVENT_CANCEL;
684  		event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
685  		break;
686  
687  	case SAM_EVENT_CID_DTX_LATCH_STATUS:
688  		clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
689  
690  		/* If state has not changed: do not send new event. */
691  		if (ddev->state.latch_status == in->data[0])
692  			goto out;
693  
694  		ddev->state.latch_status = in->data[0];
695  
696  		event.status.e.length = sizeof(u16);
697  		event.status.e.code = SDTX_EVENT_LATCH_STATUS;
698  		event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
699  		break;
700  	}
701  
702  	sdtx_push_event(ddev, &event.common);
703  
704  	/* Update device mode on base connection change. */
705  	if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
706  		unsigned long delay;
707  
708  		delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
709  		sdtx_update_device_mode(ddev, delay);
710  	}
711  
712  out:
713  	mutex_unlock(&ddev->write_lock);
714  	return SSAM_NOTIF_HANDLED;
715  }
716  
717  
718  /* -- State update functions. ----------------------------------------------- */
719  
sdtx_device_mode_invalid(u8 mode,u8 base_state)720  static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
721  {
722  	return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
723  		(mode == SDTX_DEVICE_MODE_TABLET)) ||
724  	       ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
725  		(mode != SDTX_DEVICE_MODE_TABLET));
726  }
727  
sdtx_device_mode_workfn(struct work_struct * work)728  static void sdtx_device_mode_workfn(struct work_struct *work)
729  {
730  	struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
731  	struct sdtx_status_event event;
732  	struct ssam_bas_base_info base;
733  	int status, tablet;
734  	u8 mode;
735  
736  	/* Get operation mode. */
737  	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
738  	if (status) {
739  		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
740  		return;
741  	}
742  
743  	/* Get base info. */
744  	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
745  	if (status) {
746  		dev_err(ddev->dev, "failed to get base info: %d\n", status);
747  		return;
748  	}
749  
750  	/*
751  	 * In some cases (specifically when attaching the base), the device
752  	 * mode isn't updated right away. Thus we check if the device mode
753  	 * makes sense for the given base state and try again later if it
754  	 * doesn't.
755  	 */
756  	if (sdtx_device_mode_invalid(mode, base.state)) {
757  		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
758  		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
759  		return;
760  	}
761  
762  	mutex_lock(&ddev->write_lock);
763  	clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
764  
765  	/* Avoid sending duplicate device-mode events. */
766  	if (ddev->state.device_mode == mode) {
767  		mutex_unlock(&ddev->write_lock);
768  		return;
769  	}
770  
771  	ddev->state.device_mode = mode;
772  
773  	event.e.length = sizeof(u16);
774  	event.e.code = SDTX_EVENT_DEVICE_MODE;
775  	event.v = mode;
776  
777  	sdtx_push_event(ddev, &event.e);
778  
779  	/* Send SW_TABLET_MODE event. */
780  	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
781  	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
782  	input_sync(ddev->mode_switch);
783  
784  	mutex_unlock(&ddev->write_lock);
785  }
786  
sdtx_update_device_mode(struct sdtx_device * ddev,unsigned long delay)787  static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
788  {
789  	schedule_delayed_work(&ddev->mode_work, delay);
790  }
791  
792  /* Must be executed with ddev->write_lock held. */
__sdtx_device_state_update_base(struct sdtx_device * ddev,struct ssam_bas_base_info info)793  static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
794  					    struct ssam_bas_base_info info)
795  {
796  	struct sdtx_base_info_event event;
797  
798  	lockdep_assert_held(&ddev->write_lock);
799  
800  	/* Prevent duplicate events. */
801  	if (ddev->state.base.state == info.state &&
802  	    ddev->state.base.base_id == info.base_id)
803  		return;
804  
805  	ddev->state.base = info;
806  
807  	event.e.length = sizeof(struct sdtx_base_info);
808  	event.e.code = SDTX_EVENT_BASE_CONNECTION;
809  	event.v.state = sdtx_translate_base_state(ddev, info.state);
810  	event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
811  
812  	sdtx_push_event(ddev, &event.e);
813  }
814  
815  /* Must be executed with ddev->write_lock held. */
__sdtx_device_state_update_mode(struct sdtx_device * ddev,u8 mode)816  static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
817  {
818  	struct sdtx_status_event event;
819  	int tablet;
820  
821  	/*
822  	 * Note: This function must be called after updating the base state
823  	 * via __sdtx_device_state_update_base(), as we rely on the updated
824  	 * base state value in the validity check below.
825  	 */
826  
827  	lockdep_assert_held(&ddev->write_lock);
828  
829  	if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
830  		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
831  		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
832  		return;
833  	}
834  
835  	/* Prevent duplicate events. */
836  	if (ddev->state.device_mode == mode)
837  		return;
838  
839  	ddev->state.device_mode = mode;
840  
841  	/* Send event. */
842  	event.e.length = sizeof(u16);
843  	event.e.code = SDTX_EVENT_DEVICE_MODE;
844  	event.v = mode;
845  
846  	sdtx_push_event(ddev, &event.e);
847  
848  	/* Send SW_TABLET_MODE event. */
849  	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
850  	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
851  	input_sync(ddev->mode_switch);
852  }
853  
854  /* Must be executed with ddev->write_lock held. */
__sdtx_device_state_update_latch(struct sdtx_device * ddev,u8 status)855  static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
856  {
857  	struct sdtx_status_event event;
858  
859  	lockdep_assert_held(&ddev->write_lock);
860  
861  	/* Prevent duplicate events. */
862  	if (ddev->state.latch_status == status)
863  		return;
864  
865  	ddev->state.latch_status = status;
866  
867  	event.e.length = sizeof(struct sdtx_base_info);
868  	event.e.code = SDTX_EVENT_BASE_CONNECTION;
869  	event.v = sdtx_translate_latch_status(ddev, status);
870  
871  	sdtx_push_event(ddev, &event.e);
872  }
873  
sdtx_device_state_workfn(struct work_struct * work)874  static void sdtx_device_state_workfn(struct work_struct *work)
875  {
876  	struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
877  	struct ssam_bas_base_info base;
878  	u8 mode, latch;
879  	int status;
880  
881  	/* Mark everything as dirty. */
882  	set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
883  	set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
884  	set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
885  
886  	/*
887  	 * Ensure that the state gets marked as dirty before continuing to
888  	 * query it. Necessary to ensure that clear_bit() calls in
889  	 * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
890  	 * bits if an event is received while updating the state here.
891  	 */
892  	smp_mb__after_atomic();
893  
894  	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
895  	if (status) {
896  		dev_err(ddev->dev, "failed to get base state: %d\n", status);
897  		return;
898  	}
899  
900  	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
901  	if (status) {
902  		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
903  		return;
904  	}
905  
906  	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
907  	if (status) {
908  		dev_err(ddev->dev, "failed to get latch status: %d\n", status);
909  		return;
910  	}
911  
912  	mutex_lock(&ddev->write_lock);
913  
914  	/*
915  	 * If the respective dirty-bit has been cleared, an event has been
916  	 * received, updating this state. The queried state may thus be out of
917  	 * date. At this point, we can safely assume that the state provided
918  	 * by the event is either up to date, or we're about to receive
919  	 * another event updating it.
920  	 */
921  
922  	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
923  		__sdtx_device_state_update_base(ddev, base);
924  
925  	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
926  		__sdtx_device_state_update_mode(ddev, mode);
927  
928  	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
929  		__sdtx_device_state_update_latch(ddev, latch);
930  
931  	mutex_unlock(&ddev->write_lock);
932  }
933  
sdtx_update_device_state(struct sdtx_device * ddev,unsigned long delay)934  static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
935  {
936  	schedule_delayed_work(&ddev->state_work, delay);
937  }
938  
939  
940  /* -- Common device initialization. ----------------------------------------- */
941  
sdtx_device_init(struct sdtx_device * ddev,struct device * dev,struct ssam_controller * ctrl)942  static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
943  			    struct ssam_controller *ctrl)
944  {
945  	int status, tablet_mode;
946  
947  	/* Basic initialization. */
948  	kref_init(&ddev->kref);
949  	init_rwsem(&ddev->lock);
950  	ddev->dev = dev;
951  	ddev->ctrl = ctrl;
952  
953  	ddev->mdev.minor = MISC_DYNAMIC_MINOR;
954  	ddev->mdev.name = "surface_dtx";
955  	ddev->mdev.nodename = "surface/dtx";
956  	ddev->mdev.fops = &surface_dtx_fops;
957  
958  	ddev->notif.base.priority = 1;
959  	ddev->notif.base.fn = sdtx_notifier;
960  	ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
961  	ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
962  	ddev->notif.event.id.instance = 0;
963  	ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
964  	ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
965  
966  	init_waitqueue_head(&ddev->waitq);
967  	mutex_init(&ddev->write_lock);
968  	init_rwsem(&ddev->client_lock);
969  	INIT_LIST_HEAD(&ddev->client_list);
970  
971  	INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
972  	INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
973  
974  	/*
975  	 * Get current device state. We want to guarantee that events are only
976  	 * sent when state actually changes. Thus we cannot use special
977  	 * "uninitialized" values, as that would cause problems when manually
978  	 * querying the state in surface_dtx_pm_complete(). I.e. we would not
979  	 * be able to detect state changes there if no change event has been
980  	 * received between driver initialization and first device suspension.
981  	 *
982  	 * Note that we also need to do this before registering the event
983  	 * notifier, as that may access the state values.
984  	 */
985  	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
986  	if (status)
987  		return status;
988  
989  	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
990  	if (status)
991  		return status;
992  
993  	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
994  	if (status)
995  		return status;
996  
997  	/* Set up tablet mode switch. */
998  	ddev->mode_switch = input_allocate_device();
999  	if (!ddev->mode_switch)
1000  		return -ENOMEM;
1001  
1002  	ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
1003  	ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
1004  	ddev->mode_switch->id.bustype = BUS_HOST;
1005  	ddev->mode_switch->dev.parent = ddev->dev;
1006  
1007  	tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
1008  	input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
1009  	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
1010  
1011  	status = input_register_device(ddev->mode_switch);
1012  	if (status) {
1013  		input_free_device(ddev->mode_switch);
1014  		return status;
1015  	}
1016  
1017  	/* Set up event notifier. */
1018  	status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
1019  	if (status)
1020  		goto err_notif;
1021  
1022  	/* Register miscdevice. */
1023  	status = misc_register(&ddev->mdev);
1024  	if (status)
1025  		goto err_mdev;
1026  
1027  	/*
1028  	 * Update device state in case it has changed between getting the
1029  	 * initial mode and registering the event notifier.
1030  	 */
1031  	sdtx_update_device_state(ddev, 0);
1032  	return 0;
1033  
1034  err_notif:
1035  	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1036  	cancel_delayed_work_sync(&ddev->mode_work);
1037  err_mdev:
1038  	input_unregister_device(ddev->mode_switch);
1039  	return status;
1040  }
1041  
sdtx_device_create(struct device * dev,struct ssam_controller * ctrl)1042  static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
1043  {
1044  	struct sdtx_device *ddev;
1045  	int status;
1046  
1047  	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
1048  	if (!ddev)
1049  		return ERR_PTR(-ENOMEM);
1050  
1051  	status = sdtx_device_init(ddev, dev, ctrl);
1052  	if (status) {
1053  		sdtx_device_put(ddev);
1054  		return ERR_PTR(status);
1055  	}
1056  
1057  	return ddev;
1058  }
1059  
sdtx_device_destroy(struct sdtx_device * ddev)1060  static void sdtx_device_destroy(struct sdtx_device *ddev)
1061  {
1062  	struct sdtx_client *client;
1063  
1064  	/*
1065  	 * Mark device as shut-down. Prevent new clients from being added and
1066  	 * new operations from being executed.
1067  	 */
1068  	set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
1069  
1070  	/* Disable notifiers, prevent new events from arriving. */
1071  	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1072  
1073  	/* Stop mode_work, prevent access to mode_switch. */
1074  	cancel_delayed_work_sync(&ddev->mode_work);
1075  
1076  	/* Stop state_work. */
1077  	cancel_delayed_work_sync(&ddev->state_work);
1078  
1079  	/* With mode_work canceled, we can unregister the mode_switch. */
1080  	input_unregister_device(ddev->mode_switch);
1081  
1082  	/* Wake up async clients. */
1083  	down_write(&ddev->client_lock);
1084  	list_for_each_entry(client, &ddev->client_list, node) {
1085  		kill_fasync(&client->fasync, SIGIO, POLL_HUP);
1086  	}
1087  	up_write(&ddev->client_lock);
1088  
1089  	/* Wake up blocking clients. */
1090  	wake_up_interruptible(&ddev->waitq);
1091  
1092  	/*
1093  	 * Wait for clients to finish their current operation. After this, the
1094  	 * controller and device references are guaranteed to be no longer in
1095  	 * use.
1096  	 */
1097  	down_write(&ddev->lock);
1098  	ddev->dev = NULL;
1099  	ddev->ctrl = NULL;
1100  	up_write(&ddev->lock);
1101  
1102  	/* Finally remove the misc-device. */
1103  	misc_deregister(&ddev->mdev);
1104  
1105  	/*
1106  	 * We're now guaranteed that sdtx_device_open() won't be called any
1107  	 * more, so we can now drop out reference.
1108  	 */
1109  	sdtx_device_put(ddev);
1110  }
1111  
1112  
1113  /* -- PM ops. --------------------------------------------------------------- */
1114  
1115  #ifdef CONFIG_PM_SLEEP
1116  
surface_dtx_pm_complete(struct device * dev)1117  static void surface_dtx_pm_complete(struct device *dev)
1118  {
1119  	struct sdtx_device *ddev = dev_get_drvdata(dev);
1120  
1121  	/*
1122  	 * Normally, the EC will store events while suspended (i.e. in
1123  	 * display-off state) and release them when resumed (i.e. transitioned
1124  	 * to display-on state). During hibernation, however, the EC will be
1125  	 * shut down and does not store events. Furthermore, events might be
1126  	 * dropped during prolonged suspension (it is currently unknown how
1127  	 * big this event buffer is and how it behaves on overruns).
1128  	 *
1129  	 * To prevent any problems, we update the device state here. We do
1130  	 * this delayed to ensure that any events sent by the EC directly
1131  	 * after resuming will be handled first. The delay below has been
1132  	 * chosen (experimentally), so that there should be ample time for
1133  	 * these events to be handled, before we check and, if necessary,
1134  	 * update the state.
1135  	 */
1136  	sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
1137  }
1138  
1139  static const struct dev_pm_ops surface_dtx_pm_ops = {
1140  	.complete = surface_dtx_pm_complete,
1141  };
1142  
1143  #else /* CONFIG_PM_SLEEP */
1144  
1145  static const struct dev_pm_ops surface_dtx_pm_ops = {};
1146  
1147  #endif /* CONFIG_PM_SLEEP */
1148  
1149  
1150  /* -- Platform driver. ------------------------------------------------------ */
1151  
surface_dtx_platform_probe(struct platform_device * pdev)1152  static int surface_dtx_platform_probe(struct platform_device *pdev)
1153  {
1154  	struct ssam_controller *ctrl;
1155  	struct sdtx_device *ddev;
1156  
1157  	/* Link to EC. */
1158  	ctrl = ssam_client_bind(&pdev->dev);
1159  	if (IS_ERR(ctrl))
1160  		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
1161  
1162  	ddev = sdtx_device_create(&pdev->dev, ctrl);
1163  	if (IS_ERR(ddev))
1164  		return PTR_ERR(ddev);
1165  
1166  	platform_set_drvdata(pdev, ddev);
1167  	return 0;
1168  }
1169  
surface_dtx_platform_remove(struct platform_device * pdev)1170  static void surface_dtx_platform_remove(struct platform_device *pdev)
1171  {
1172  	sdtx_device_destroy(platform_get_drvdata(pdev));
1173  }
1174  
1175  static const struct acpi_device_id surface_dtx_acpi_match[] = {
1176  	{ "MSHW0133", 0 },
1177  	{ },
1178  };
1179  MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
1180  
1181  static struct platform_driver surface_dtx_platform_driver = {
1182  	.probe = surface_dtx_platform_probe,
1183  	.remove_new = surface_dtx_platform_remove,
1184  	.driver = {
1185  		.name = "surface_dtx_pltf",
1186  		.acpi_match_table = surface_dtx_acpi_match,
1187  		.pm = &surface_dtx_pm_ops,
1188  		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1189  	},
1190  };
1191  
1192  
1193  /* -- SSAM device driver. --------------------------------------------------- */
1194  
1195  #ifdef CONFIG_SURFACE_AGGREGATOR_BUS
1196  
surface_dtx_ssam_probe(struct ssam_device * sdev)1197  static int surface_dtx_ssam_probe(struct ssam_device *sdev)
1198  {
1199  	struct sdtx_device *ddev;
1200  
1201  	ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
1202  	if (IS_ERR(ddev))
1203  		return PTR_ERR(ddev);
1204  
1205  	ssam_device_set_drvdata(sdev, ddev);
1206  	return 0;
1207  }
1208  
surface_dtx_ssam_remove(struct ssam_device * sdev)1209  static void surface_dtx_ssam_remove(struct ssam_device *sdev)
1210  {
1211  	sdtx_device_destroy(ssam_device_get_drvdata(sdev));
1212  }
1213  
1214  static const struct ssam_device_id surface_dtx_ssam_match[] = {
1215  	{ SSAM_SDEV(BAS, SAM, 0x00, 0x00) },
1216  	{ },
1217  };
1218  MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
1219  
1220  static struct ssam_device_driver surface_dtx_ssam_driver = {
1221  	.probe = surface_dtx_ssam_probe,
1222  	.remove = surface_dtx_ssam_remove,
1223  	.match_table = surface_dtx_ssam_match,
1224  	.driver = {
1225  		.name = "surface_dtx",
1226  		.pm = &surface_dtx_pm_ops,
1227  		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1228  	},
1229  };
1230  
ssam_dtx_driver_register(void)1231  static int ssam_dtx_driver_register(void)
1232  {
1233  	return ssam_device_driver_register(&surface_dtx_ssam_driver);
1234  }
1235  
ssam_dtx_driver_unregister(void)1236  static void ssam_dtx_driver_unregister(void)
1237  {
1238  	ssam_device_driver_unregister(&surface_dtx_ssam_driver);
1239  }
1240  
1241  #else /* CONFIG_SURFACE_AGGREGATOR_BUS */
1242  
ssam_dtx_driver_register(void)1243  static int ssam_dtx_driver_register(void)
1244  {
1245  	return 0;
1246  }
1247  
ssam_dtx_driver_unregister(void)1248  static void ssam_dtx_driver_unregister(void)
1249  {
1250  }
1251  
1252  #endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
1253  
1254  
1255  /* -- Module setup. --------------------------------------------------------- */
1256  
surface_dtx_init(void)1257  static int __init surface_dtx_init(void)
1258  {
1259  	int status;
1260  
1261  	status = ssam_dtx_driver_register();
1262  	if (status)
1263  		return status;
1264  
1265  	status = platform_driver_register(&surface_dtx_platform_driver);
1266  	if (status)
1267  		ssam_dtx_driver_unregister();
1268  
1269  	return status;
1270  }
1271  module_init(surface_dtx_init);
1272  
surface_dtx_exit(void)1273  static void __exit surface_dtx_exit(void)
1274  {
1275  	platform_driver_unregister(&surface_dtx_platform_driver);
1276  	ssam_dtx_driver_unregister();
1277  }
1278  module_exit(surface_dtx_exit);
1279  
1280  MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
1281  MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
1282  MODULE_LICENSE("GPL");
1283