1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Greybus Firmware Management Protocol Driver.
4 *
5 * Copyright 2016 Google Inc.
6 * Copyright 2016 Linaro Ltd.
7 */
8
9 #include <linux/cdev.h>
10 #include <linux/completion.h>
11 #include <linux/firmware.h>
12 #include <linux/fs.h>
13 #include <linux/idr.h>
14 #include <linux/ioctl.h>
15 #include <linux/uaccess.h>
16 #include <linux/greybus.h>
17
18 #include "firmware.h"
19 #include "greybus_firmware.h"
20
21 #define FW_MGMT_TIMEOUT_MS 1000
22
23 struct fw_mgmt {
24 struct device *parent;
25 struct gb_connection *connection;
26 struct kref kref;
27 struct list_head node;
28
29 /* Common id-map for interface and backend firmware requests */
30 struct ida id_map;
31 struct mutex mutex;
32 struct completion completion;
33 struct cdev cdev;
34 struct device *class_device;
35 dev_t dev_num;
36 unsigned int timeout_jiffies;
37 bool disabled; /* connection getting disabled */
38
39 /* Interface Firmware specific fields */
40 bool mode_switch_started;
41 bool intf_fw_loaded;
42 u8 intf_fw_request_id;
43 u8 intf_fw_status;
44 u16 intf_fw_major;
45 u16 intf_fw_minor;
46
47 /* Backend Firmware specific fields */
48 u8 backend_fw_request_id;
49 u8 backend_fw_status;
50 };
51
52 /*
53 * Number of minor devices this driver supports.
54 * There will be exactly one required per Interface.
55 */
56 #define NUM_MINORS U8_MAX
57
58 static const struct class fw_mgmt_class = {
59 .name = "gb_fw_mgmt",
60 };
61
62 static dev_t fw_mgmt_dev_num;
63 static DEFINE_IDA(fw_mgmt_minors_map);
64 static LIST_HEAD(fw_mgmt_list);
65 static DEFINE_MUTEX(list_mutex);
66
fw_mgmt_kref_release(struct kref * kref)67 static void fw_mgmt_kref_release(struct kref *kref)
68 {
69 struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
70
71 ida_destroy(&fw_mgmt->id_map);
72 kfree(fw_mgmt);
73 }
74
75 /*
76 * All users of fw_mgmt take a reference (from within list_mutex lock), before
77 * they get a pointer to play with. And the structure will be freed only after
78 * the last user has put the reference to it.
79 */
put_fw_mgmt(struct fw_mgmt * fw_mgmt)80 static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
81 {
82 kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
83 }
84
85 /* Caller must call put_fw_mgmt() after using struct fw_mgmt */
get_fw_mgmt(struct cdev * cdev)86 static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
87 {
88 struct fw_mgmt *fw_mgmt;
89
90 mutex_lock(&list_mutex);
91
92 list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
93 if (&fw_mgmt->cdev == cdev) {
94 kref_get(&fw_mgmt->kref);
95 goto unlock;
96 }
97 }
98
99 fw_mgmt = NULL;
100
101 unlock:
102 mutex_unlock(&list_mutex);
103
104 return fw_mgmt;
105 }
106
fw_mgmt_interface_fw_version_operation(struct fw_mgmt * fw_mgmt,struct fw_mgmt_ioc_get_intf_version * fw_info)107 static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
108 struct fw_mgmt_ioc_get_intf_version *fw_info)
109 {
110 struct gb_connection *connection = fw_mgmt->connection;
111 struct gb_fw_mgmt_interface_fw_version_response response;
112 int ret;
113
114 ret = gb_operation_sync(connection,
115 GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
116 &response, sizeof(response));
117 if (ret) {
118 dev_err(fw_mgmt->parent,
119 "failed to get interface firmware version (%d)\n", ret);
120 return ret;
121 }
122
123 fw_info->major = le16_to_cpu(response.major);
124 fw_info->minor = le16_to_cpu(response.minor);
125
126 strscpy_pad(fw_info->firmware_tag, response.firmware_tag);
127
128 /*
129 * The firmware-tag should be NULL terminated, otherwise throw error but
130 * don't fail.
131 */
132 if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
133 dev_err(fw_mgmt->parent,
134 "fw-version: firmware-tag is not NULL terminated\n");
135 fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
136 }
137
138 return 0;
139 }
140
fw_mgmt_load_and_validate_operation(struct fw_mgmt * fw_mgmt,u8 load_method,const char * tag)141 static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
142 u8 load_method, const char *tag)
143 {
144 struct gb_fw_mgmt_load_and_validate_fw_request request;
145 int ret;
146
147 if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
148 load_method != GB_FW_LOAD_METHOD_INTERNAL) {
149 dev_err(fw_mgmt->parent,
150 "invalid load-method (%d)\n", load_method);
151 return -EINVAL;
152 }
153
154 request.load_method = load_method;
155 strscpy_pad(request.firmware_tag, tag);
156
157 /*
158 * The firmware-tag should be NULL terminated, otherwise throw error and
159 * fail.
160 */
161 if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
162 dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
163 return -EINVAL;
164 }
165
166 /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
167 ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
168 if (ret < 0) {
169 dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
170 ret);
171 return ret;
172 }
173
174 fw_mgmt->intf_fw_request_id = ret;
175 fw_mgmt->intf_fw_loaded = false;
176 request.request_id = ret;
177
178 ret = gb_operation_sync(fw_mgmt->connection,
179 GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
180 sizeof(request), NULL, 0);
181 if (ret) {
182 ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
183 fw_mgmt->intf_fw_request_id = 0;
184 dev_err(fw_mgmt->parent,
185 "load and validate firmware request failed (%d)\n",
186 ret);
187 return ret;
188 }
189
190 return 0;
191 }
192
fw_mgmt_interface_fw_loaded_operation(struct gb_operation * op)193 static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
194 {
195 struct gb_connection *connection = op->connection;
196 struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
197 struct gb_fw_mgmt_loaded_fw_request *request;
198
199 /* No pending load and validate request ? */
200 if (!fw_mgmt->intf_fw_request_id) {
201 dev_err(fw_mgmt->parent,
202 "unexpected firmware loaded request received\n");
203 return -ENODEV;
204 }
205
206 if (op->request->payload_size != sizeof(*request)) {
207 dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
208 op->request->payload_size, sizeof(*request));
209 return -EINVAL;
210 }
211
212 request = op->request->payload;
213
214 /* Invalid request-id ? */
215 if (request->request_id != fw_mgmt->intf_fw_request_id) {
216 dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
217 fw_mgmt->intf_fw_request_id, request->request_id);
218 return -ENODEV;
219 }
220
221 ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
222 fw_mgmt->intf_fw_request_id = 0;
223 fw_mgmt->intf_fw_status = request->status;
224 fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
225 fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
226
227 if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
228 dev_err(fw_mgmt->parent,
229 "failed to load interface firmware, status:%02x\n",
230 fw_mgmt->intf_fw_status);
231 else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
232 dev_err(fw_mgmt->parent,
233 "failed to validate interface firmware, status:%02x\n",
234 fw_mgmt->intf_fw_status);
235 else
236 fw_mgmt->intf_fw_loaded = true;
237
238 complete(&fw_mgmt->completion);
239
240 return 0;
241 }
242
fw_mgmt_backend_fw_version_operation(struct fw_mgmt * fw_mgmt,struct fw_mgmt_ioc_get_backend_version * fw_info)243 static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
244 struct fw_mgmt_ioc_get_backend_version *fw_info)
245 {
246 struct gb_connection *connection = fw_mgmt->connection;
247 struct gb_fw_mgmt_backend_fw_version_request request;
248 struct gb_fw_mgmt_backend_fw_version_response response;
249 int ret;
250
251 strscpy_pad(request.firmware_tag, fw_info->firmware_tag);
252
253 /*
254 * The firmware-tag should be NULL terminated, otherwise throw error and
255 * fail.
256 */
257 if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
258 dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
259 return -EINVAL;
260 }
261
262 ret = gb_operation_sync(connection,
263 GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
264 sizeof(request), &response, sizeof(response));
265 if (ret) {
266 dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
267 fw_info->firmware_tag, ret);
268 return ret;
269 }
270
271 fw_info->status = response.status;
272
273 /* Reset version as that should be non-zero only for success case */
274 fw_info->major = 0;
275 fw_info->minor = 0;
276
277 switch (fw_info->status) {
278 case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
279 fw_info->major = le16_to_cpu(response.major);
280 fw_info->minor = le16_to_cpu(response.minor);
281 break;
282 case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
283 case GB_FW_BACKEND_VERSION_STATUS_RETRY:
284 break;
285 case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
286 dev_err(fw_mgmt->parent,
287 "Firmware with tag %s is not supported by Interface\n",
288 fw_info->firmware_tag);
289 break;
290 default:
291 dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
292 fw_info->status);
293 }
294
295 return 0;
296 }
297
fw_mgmt_backend_fw_update_operation(struct fw_mgmt * fw_mgmt,char * tag)298 static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
299 char *tag)
300 {
301 struct gb_fw_mgmt_backend_fw_update_request request;
302 int ret;
303
304 ret = strscpy_pad(request.firmware_tag, tag);
305
306 /*
307 * The firmware-tag should be NULL terminated, otherwise throw error and
308 * fail.
309 */
310 if (ret == -E2BIG) {
311 dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
312 return -EINVAL;
313 }
314
315 /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
316 ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
317 if (ret < 0) {
318 dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
319 ret);
320 return ret;
321 }
322
323 fw_mgmt->backend_fw_request_id = ret;
324 request.request_id = ret;
325
326 ret = gb_operation_sync(fw_mgmt->connection,
327 GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
328 sizeof(request), NULL, 0);
329 if (ret) {
330 ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
331 fw_mgmt->backend_fw_request_id = 0;
332 dev_err(fw_mgmt->parent,
333 "backend %s firmware update request failed (%d)\n", tag,
334 ret);
335 return ret;
336 }
337
338 return 0;
339 }
340
fw_mgmt_backend_fw_updated_operation(struct gb_operation * op)341 static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
342 {
343 struct gb_connection *connection = op->connection;
344 struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
345 struct gb_fw_mgmt_backend_fw_updated_request *request;
346
347 /* No pending load and validate request ? */
348 if (!fw_mgmt->backend_fw_request_id) {
349 dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
350 return -ENODEV;
351 }
352
353 if (op->request->payload_size != sizeof(*request)) {
354 dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
355 op->request->payload_size, sizeof(*request));
356 return -EINVAL;
357 }
358
359 request = op->request->payload;
360
361 /* Invalid request-id ? */
362 if (request->request_id != fw_mgmt->backend_fw_request_id) {
363 dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
364 fw_mgmt->backend_fw_request_id, request->request_id);
365 return -ENODEV;
366 }
367
368 ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
369 fw_mgmt->backend_fw_request_id = 0;
370 fw_mgmt->backend_fw_status = request->status;
371
372 if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
373 (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
374 dev_err(fw_mgmt->parent,
375 "failed to load backend firmware: %02x\n",
376 fw_mgmt->backend_fw_status);
377
378 complete(&fw_mgmt->completion);
379
380 return 0;
381 }
382
383 /* Char device fops */
384
fw_mgmt_open(struct inode * inode,struct file * file)385 static int fw_mgmt_open(struct inode *inode, struct file *file)
386 {
387 struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
388
389 /* fw_mgmt structure can't get freed until file descriptor is closed */
390 if (fw_mgmt) {
391 file->private_data = fw_mgmt;
392 return 0;
393 }
394
395 return -ENODEV;
396 }
397
fw_mgmt_release(struct inode * inode,struct file * file)398 static int fw_mgmt_release(struct inode *inode, struct file *file)
399 {
400 struct fw_mgmt *fw_mgmt = file->private_data;
401
402 put_fw_mgmt(fw_mgmt);
403 return 0;
404 }
405
fw_mgmt_ioctl(struct fw_mgmt * fw_mgmt,unsigned int cmd,void __user * buf)406 static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
407 void __user *buf)
408 {
409 struct fw_mgmt_ioc_get_intf_version intf_fw_info;
410 struct fw_mgmt_ioc_get_backend_version backend_fw_info;
411 struct fw_mgmt_ioc_intf_load_and_validate intf_load;
412 struct fw_mgmt_ioc_backend_fw_update backend_update;
413 unsigned int timeout;
414 int ret;
415
416 /* Reject any operations after mode-switch has started */
417 if (fw_mgmt->mode_switch_started)
418 return -EBUSY;
419
420 switch (cmd) {
421 case FW_MGMT_IOC_GET_INTF_FW:
422 ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
423 &intf_fw_info);
424 if (ret)
425 return ret;
426
427 if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
428 return -EFAULT;
429
430 return 0;
431 case FW_MGMT_IOC_GET_BACKEND_FW:
432 if (copy_from_user(&backend_fw_info, buf,
433 sizeof(backend_fw_info)))
434 return -EFAULT;
435
436 ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
437 &backend_fw_info);
438 if (ret)
439 return ret;
440
441 if (copy_to_user(buf, &backend_fw_info,
442 sizeof(backend_fw_info)))
443 return -EFAULT;
444
445 return 0;
446 case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
447 if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
448 return -EFAULT;
449
450 ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
451 intf_load.load_method, intf_load.firmware_tag);
452 if (ret)
453 return ret;
454
455 if (!wait_for_completion_timeout(&fw_mgmt->completion,
456 fw_mgmt->timeout_jiffies)) {
457 dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
458 return -ETIMEDOUT;
459 }
460
461 intf_load.status = fw_mgmt->intf_fw_status;
462 intf_load.major = fw_mgmt->intf_fw_major;
463 intf_load.minor = fw_mgmt->intf_fw_minor;
464
465 if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
466 return -EFAULT;
467
468 return 0;
469 case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
470 if (copy_from_user(&backend_update, buf,
471 sizeof(backend_update)))
472 return -EFAULT;
473
474 ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
475 backend_update.firmware_tag);
476 if (ret)
477 return ret;
478
479 if (!wait_for_completion_timeout(&fw_mgmt->completion,
480 fw_mgmt->timeout_jiffies)) {
481 dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
482 return -ETIMEDOUT;
483 }
484
485 backend_update.status = fw_mgmt->backend_fw_status;
486
487 if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
488 return -EFAULT;
489
490 return 0;
491 case FW_MGMT_IOC_SET_TIMEOUT_MS:
492 if (get_user(timeout, (unsigned int __user *)buf))
493 return -EFAULT;
494
495 if (!timeout) {
496 dev_err(fw_mgmt->parent, "timeout can't be zero\n");
497 return -EINVAL;
498 }
499
500 fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
501
502 return 0;
503 case FW_MGMT_IOC_MODE_SWITCH:
504 if (!fw_mgmt->intf_fw_loaded) {
505 dev_err(fw_mgmt->parent,
506 "Firmware not loaded for mode-switch\n");
507 return -EPERM;
508 }
509
510 /*
511 * Disallow new ioctls as the fw-core bundle driver is going to
512 * get disconnected soon and the character device will get
513 * removed.
514 */
515 fw_mgmt->mode_switch_started = true;
516
517 ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
518 if (ret) {
519 dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
520 ret);
521 fw_mgmt->mode_switch_started = false;
522 return ret;
523 }
524
525 return 0;
526 default:
527 return -ENOTTY;
528 }
529 }
530
fw_mgmt_ioctl_unlocked(struct file * file,unsigned int cmd,unsigned long arg)531 static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
532 unsigned long arg)
533 {
534 struct fw_mgmt *fw_mgmt = file->private_data;
535 struct gb_bundle *bundle = fw_mgmt->connection->bundle;
536 int ret = -ENODEV;
537
538 /*
539 * Serialize ioctls.
540 *
541 * We don't want the user to do few operations in parallel. For example,
542 * updating Interface firmware in parallel for the same Interface. There
543 * is no need to do things in parallel for speed and we can avoid having
544 * complicated code for now.
545 *
546 * This is also used to protect ->disabled, which is used to check if
547 * the connection is getting disconnected, so that we don't start any
548 * new operations.
549 */
550 mutex_lock(&fw_mgmt->mutex);
551 if (!fw_mgmt->disabled) {
552 ret = gb_pm_runtime_get_sync(bundle);
553 if (!ret) {
554 ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
555 gb_pm_runtime_put_autosuspend(bundle);
556 }
557 }
558 mutex_unlock(&fw_mgmt->mutex);
559
560 return ret;
561 }
562
563 static const struct file_operations fw_mgmt_fops = {
564 .owner = THIS_MODULE,
565 .open = fw_mgmt_open,
566 .release = fw_mgmt_release,
567 .unlocked_ioctl = fw_mgmt_ioctl_unlocked,
568 };
569
gb_fw_mgmt_request_handler(struct gb_operation * op)570 int gb_fw_mgmt_request_handler(struct gb_operation *op)
571 {
572 u8 type = op->type;
573
574 switch (type) {
575 case GB_FW_MGMT_TYPE_LOADED_FW:
576 return fw_mgmt_interface_fw_loaded_operation(op);
577 case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
578 return fw_mgmt_backend_fw_updated_operation(op);
579 default:
580 dev_err(&op->connection->bundle->dev,
581 "unsupported request: %u\n", type);
582 return -EINVAL;
583 }
584 }
585
gb_fw_mgmt_connection_init(struct gb_connection * connection)586 int gb_fw_mgmt_connection_init(struct gb_connection *connection)
587 {
588 struct fw_mgmt *fw_mgmt;
589 int ret, minor;
590
591 if (!connection)
592 return 0;
593
594 fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
595 if (!fw_mgmt)
596 return -ENOMEM;
597
598 fw_mgmt->parent = &connection->bundle->dev;
599 fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
600 fw_mgmt->connection = connection;
601
602 gb_connection_set_data(connection, fw_mgmt);
603 init_completion(&fw_mgmt->completion);
604 ida_init(&fw_mgmt->id_map);
605 mutex_init(&fw_mgmt->mutex);
606 kref_init(&fw_mgmt->kref);
607
608 mutex_lock(&list_mutex);
609 list_add(&fw_mgmt->node, &fw_mgmt_list);
610 mutex_unlock(&list_mutex);
611
612 ret = gb_connection_enable(connection);
613 if (ret)
614 goto err_list_del;
615
616 minor = ida_alloc_max(&fw_mgmt_minors_map, NUM_MINORS - 1, GFP_KERNEL);
617 if (minor < 0) {
618 ret = minor;
619 goto err_connection_disable;
620 }
621
622 /* Add a char device to allow userspace to interact with fw-mgmt */
623 fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
624 cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
625
626 ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
627 if (ret)
628 goto err_remove_ida;
629
630 /* Add a soft link to the previously added char-dev within the bundle */
631 fw_mgmt->class_device = device_create(&fw_mgmt_class, fw_mgmt->parent,
632 fw_mgmt->dev_num, NULL,
633 "gb-fw-mgmt-%d", minor);
634 if (IS_ERR(fw_mgmt->class_device)) {
635 ret = PTR_ERR(fw_mgmt->class_device);
636 goto err_del_cdev;
637 }
638
639 return 0;
640
641 err_del_cdev:
642 cdev_del(&fw_mgmt->cdev);
643 err_remove_ida:
644 ida_free(&fw_mgmt_minors_map, minor);
645 err_connection_disable:
646 gb_connection_disable(connection);
647 err_list_del:
648 mutex_lock(&list_mutex);
649 list_del(&fw_mgmt->node);
650 mutex_unlock(&list_mutex);
651
652 put_fw_mgmt(fw_mgmt);
653
654 return ret;
655 }
656
gb_fw_mgmt_connection_exit(struct gb_connection * connection)657 void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
658 {
659 struct fw_mgmt *fw_mgmt;
660
661 if (!connection)
662 return;
663
664 fw_mgmt = gb_connection_get_data(connection);
665
666 device_destroy(&fw_mgmt_class, fw_mgmt->dev_num);
667 cdev_del(&fw_mgmt->cdev);
668 ida_free(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
669
670 /*
671 * Disallow any new ioctl operations on the char device and wait for
672 * existing ones to finish.
673 */
674 mutex_lock(&fw_mgmt->mutex);
675 fw_mgmt->disabled = true;
676 mutex_unlock(&fw_mgmt->mutex);
677
678 /* All pending greybus operations should have finished by now */
679 gb_connection_disable(fw_mgmt->connection);
680
681 /* Disallow new users to get access to the fw_mgmt structure */
682 mutex_lock(&list_mutex);
683 list_del(&fw_mgmt->node);
684 mutex_unlock(&list_mutex);
685
686 /*
687 * All current users of fw_mgmt would have taken a reference to it by
688 * now, we can drop our reference and wait the last user will get
689 * fw_mgmt freed.
690 */
691 put_fw_mgmt(fw_mgmt);
692 }
693
fw_mgmt_init(void)694 int fw_mgmt_init(void)
695 {
696 int ret;
697
698 ret = class_register(&fw_mgmt_class);
699 if (ret)
700 return ret;
701
702 ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
703 "gb_fw_mgmt");
704 if (ret)
705 goto err_remove_class;
706
707 return 0;
708
709 err_remove_class:
710 class_unregister(&fw_mgmt_class);
711 return ret;
712 }
713
fw_mgmt_exit(void)714 void fw_mgmt_exit(void)
715 {
716 unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
717 class_unregister(&fw_mgmt_class);
718 ida_destroy(&fw_mgmt_minors_map);
719 }
720