1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2004 IBM Corporation
4   * Authors:
5   * Leendert van Doorn <leendert@watson.ibm.com>
6   * Dave Safford <safford@watson.ibm.com>
7   * Reiner Sailer <sailer@watson.ibm.com>
8   * Kylene Hall <kjhall@us.ibm.com>
9   *
10   * Copyright (C) 2013 Obsidian Research Corp
11   * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
12   *
13   * Device file system interface to the TPM
14   */
15  #include <linux/poll.h>
16  #include <linux/slab.h>
17  #include <linux/uaccess.h>
18  #include <linux/workqueue.h>
19  #include "tpm.h"
20  #include "tpm-dev.h"
21  
22  static struct workqueue_struct *tpm_dev_wq;
23  
tpm_dev_transmit(struct tpm_chip * chip,struct tpm_space * space,u8 * buf,size_t bufsiz)24  static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
25  				u8 *buf, size_t bufsiz)
26  {
27  	struct tpm_header *header = (void *)buf;
28  	ssize_t ret, len;
29  
30  	if (chip->flags & TPM_CHIP_FLAG_TPM2)
31  		tpm2_end_auth_session(chip);
32  
33  	ret = tpm2_prepare_space(chip, space, buf, bufsiz);
34  	/* If the command is not implemented by the TPM, synthesize a
35  	 * response with a TPM2_RC_COMMAND_CODE return for user-space.
36  	 */
37  	if (ret == -EOPNOTSUPP) {
38  		header->length = cpu_to_be32(sizeof(*header));
39  		header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
40  		header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
41  						  TSS2_RESMGR_TPM_RC_LAYER);
42  		ret = sizeof(*header);
43  	}
44  	if (ret)
45  		goto out_rc;
46  
47  	len = tpm_transmit(chip, buf, bufsiz);
48  	if (len < 0)
49  		ret = len;
50  
51  	if (!ret)
52  		ret = tpm2_commit_space(chip, space, buf, &len);
53  	else
54  		tpm2_flush_space(chip);
55  
56  out_rc:
57  	return ret ? ret : len;
58  }
59  
tpm_dev_async_work(struct work_struct * work)60  static void tpm_dev_async_work(struct work_struct *work)
61  {
62  	struct file_priv *priv =
63  			container_of(work, struct file_priv, async_work);
64  	ssize_t ret;
65  
66  	mutex_lock(&priv->buffer_mutex);
67  	priv->command_enqueued = false;
68  	ret = tpm_try_get_ops(priv->chip);
69  	if (ret) {
70  		priv->response_length = ret;
71  		goto out;
72  	}
73  
74  	ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
75  			       sizeof(priv->data_buffer));
76  	tpm_put_ops(priv->chip);
77  
78  	/*
79  	 * If ret is > 0 then tpm_dev_transmit returned the size of the
80  	 * response. If ret is < 0 then tpm_dev_transmit failed and
81  	 * returned an error code.
82  	 */
83  	if (ret != 0) {
84  		priv->response_length = ret;
85  		mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
86  	}
87  out:
88  	mutex_unlock(&priv->buffer_mutex);
89  	wake_up_interruptible(&priv->async_wait);
90  }
91  
user_reader_timeout(struct timer_list * t)92  static void user_reader_timeout(struct timer_list *t)
93  {
94  	struct file_priv *priv = from_timer(priv, t, user_read_timer);
95  
96  	pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
97  		task_tgid_nr(current));
98  
99  	schedule_work(&priv->timeout_work);
100  }
101  
tpm_timeout_work(struct work_struct * work)102  static void tpm_timeout_work(struct work_struct *work)
103  {
104  	struct file_priv *priv = container_of(work, struct file_priv,
105  					      timeout_work);
106  
107  	mutex_lock(&priv->buffer_mutex);
108  	priv->response_read = true;
109  	priv->response_length = 0;
110  	memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
111  	mutex_unlock(&priv->buffer_mutex);
112  	wake_up_interruptible(&priv->async_wait);
113  }
114  
tpm_common_open(struct file * file,struct tpm_chip * chip,struct file_priv * priv,struct tpm_space * space)115  void tpm_common_open(struct file *file, struct tpm_chip *chip,
116  		     struct file_priv *priv, struct tpm_space *space)
117  {
118  	priv->chip = chip;
119  	priv->space = space;
120  	priv->response_read = true;
121  
122  	mutex_init(&priv->buffer_mutex);
123  	timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
124  	INIT_WORK(&priv->timeout_work, tpm_timeout_work);
125  	INIT_WORK(&priv->async_work, tpm_dev_async_work);
126  	init_waitqueue_head(&priv->async_wait);
127  	file->private_data = priv;
128  }
129  
tpm_common_read(struct file * file,char __user * buf,size_t size,loff_t * off)130  ssize_t tpm_common_read(struct file *file, char __user *buf,
131  			size_t size, loff_t *off)
132  {
133  	struct file_priv *priv = file->private_data;
134  	ssize_t ret_size = 0;
135  	int rc;
136  
137  	mutex_lock(&priv->buffer_mutex);
138  
139  	if (priv->response_length) {
140  		priv->response_read = true;
141  
142  		ret_size = min_t(ssize_t, size, priv->response_length);
143  		if (ret_size <= 0) {
144  			priv->response_length = 0;
145  			goto out;
146  		}
147  
148  		rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
149  		if (rc) {
150  			memset(priv->data_buffer, 0, TPM_BUFSIZE);
151  			priv->response_length = 0;
152  			ret_size = -EFAULT;
153  		} else {
154  			memset(priv->data_buffer + *off, 0, ret_size);
155  			priv->response_length -= ret_size;
156  			*off += ret_size;
157  		}
158  	}
159  
160  out:
161  	if (!priv->response_length) {
162  		*off = 0;
163  		del_timer_sync(&priv->user_read_timer);
164  		flush_work(&priv->timeout_work);
165  	}
166  	mutex_unlock(&priv->buffer_mutex);
167  	return ret_size;
168  }
169  
tpm_common_write(struct file * file,const char __user * buf,size_t size,loff_t * off)170  ssize_t tpm_common_write(struct file *file, const char __user *buf,
171  			 size_t size, loff_t *off)
172  {
173  	struct file_priv *priv = file->private_data;
174  	int ret = 0;
175  
176  	if (size > TPM_BUFSIZE)
177  		return -E2BIG;
178  
179  	mutex_lock(&priv->buffer_mutex);
180  
181  	/* Cannot perform a write until the read has cleared either via
182  	 * tpm_read or a user_read_timer timeout. This also prevents split
183  	 * buffered writes from blocking here.
184  	 */
185  	if ((!priv->response_read && priv->response_length) ||
186  	    priv->command_enqueued) {
187  		ret = -EBUSY;
188  		goto out;
189  	}
190  
191  	if (copy_from_user(priv->data_buffer, buf, size)) {
192  		ret = -EFAULT;
193  		goto out;
194  	}
195  
196  	if (size < 6 ||
197  	    size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
198  		ret = -EINVAL;
199  		goto out;
200  	}
201  
202  	priv->response_length = 0;
203  	priv->response_read = false;
204  	*off = 0;
205  
206  	/*
207  	 * If in nonblocking mode schedule an async job to send
208  	 * the command return the size.
209  	 * In case of error the err code will be returned in
210  	 * the subsequent read call.
211  	 */
212  	if (file->f_flags & O_NONBLOCK) {
213  		priv->command_enqueued = true;
214  		queue_work(tpm_dev_wq, &priv->async_work);
215  		mutex_unlock(&priv->buffer_mutex);
216  		return size;
217  	}
218  
219  	/* atomic tpm command send and result receive. We only hold the ops
220  	 * lock during this period so that the tpm can be unregistered even if
221  	 * the char dev is held open.
222  	 */
223  	if (tpm_try_get_ops(priv->chip)) {
224  		ret = -EPIPE;
225  		goto out;
226  	}
227  
228  	ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
229  			       sizeof(priv->data_buffer));
230  	tpm_put_ops(priv->chip);
231  
232  	if (ret > 0) {
233  		priv->response_length = ret;
234  		mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
235  		ret = size;
236  	}
237  out:
238  	mutex_unlock(&priv->buffer_mutex);
239  	return ret;
240  }
241  
tpm_common_poll(struct file * file,poll_table * wait)242  __poll_t tpm_common_poll(struct file *file, poll_table *wait)
243  {
244  	struct file_priv *priv = file->private_data;
245  	__poll_t mask = 0;
246  
247  	poll_wait(file, &priv->async_wait, wait);
248  	mutex_lock(&priv->buffer_mutex);
249  
250  	/*
251  	 * The response_length indicates if there is still response
252  	 * (or part of it) to be consumed. Partial reads decrease it
253  	 * by the number of bytes read, and write resets it the zero.
254  	 */
255  	if (priv->response_length)
256  		mask = EPOLLIN | EPOLLRDNORM;
257  	else
258  		mask = EPOLLOUT | EPOLLWRNORM;
259  
260  	mutex_unlock(&priv->buffer_mutex);
261  	return mask;
262  }
263  
264  /*
265   * Called on file close
266   */
tpm_common_release(struct file * file,struct file_priv * priv)267  void tpm_common_release(struct file *file, struct file_priv *priv)
268  {
269  	flush_work(&priv->async_work);
270  	del_timer_sync(&priv->user_read_timer);
271  	flush_work(&priv->timeout_work);
272  	file->private_data = NULL;
273  	priv->response_length = 0;
274  }
275  
tpm_dev_common_init(void)276  int __init tpm_dev_common_init(void)
277  {
278  	tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
279  
280  	return !tpm_dev_wq ? -ENOMEM : 0;
281  }
282  
tpm_dev_common_exit(void)283  void __exit tpm_dev_common_exit(void)
284  {
285  	if (tpm_dev_wq) {
286  		destroy_workqueue(tpm_dev_wq);
287  		tpm_dev_wq = NULL;
288  	}
289  }
290