1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel MAX10 Board Management Controller Secure Update Driver
4 *
5 * Copyright (C) 2019-2022 Intel Corporation. All rights reserved.
6 *
7 */
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/firmware.h>
11 #include <linux/mfd/intel-m10-bmc.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16
17 struct m10bmc_sec;
18
19 struct m10bmc_sec_ops {
20 int (*rsu_status)(struct m10bmc_sec *sec);
21 };
22
23 struct m10bmc_sec {
24 struct device *dev;
25 struct intel_m10bmc *m10bmc;
26 struct fw_upload *fwl;
27 char *fw_name;
28 u32 fw_name_id;
29 bool cancel_request;
30 const struct m10bmc_sec_ops *ops;
31 };
32
33 static DEFINE_XARRAY_ALLOC(fw_upload_xa);
34
35 /* Root Entry Hash (REH) support */
36 #define REH_SHA256_SIZE 32
37 #define REH_SHA384_SIZE 48
38 #define REH_MAGIC GENMASK(15, 0)
39 #define REH_SHA_NUM_BYTES GENMASK(31, 16)
40
m10bmc_sec_write(struct m10bmc_sec * sec,const u8 * buf,u32 offset,u32 size)41 static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
42 {
43 struct intel_m10bmc *m10bmc = sec->m10bmc;
44 unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
45 u32 write_count = size / stride;
46 u32 leftover_offset = write_count * stride;
47 u32 leftover_size = size - leftover_offset;
48 u32 leftover_tmp = 0;
49 int ret;
50
51 if (sec->m10bmc->flash_bulk_ops)
52 return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size);
53
54 if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
55 return -EINVAL;
56
57 ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
58 buf + offset, write_count);
59 if (ret)
60 return ret;
61
62 /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
63 if (leftover_size) {
64 memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
65 ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
66 leftover_tmp);
67 if (ret)
68 return ret;
69 }
70
71 return 0;
72 }
73
m10bmc_sec_read(struct m10bmc_sec * sec,u8 * buf,u32 addr,u32 size)74 static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
75 {
76 struct intel_m10bmc *m10bmc = sec->m10bmc;
77 unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
78 u32 read_count = size / stride;
79 u32 leftover_offset = read_count * stride;
80 u32 leftover_size = size - leftover_offset;
81 u32 leftover_tmp;
82 int ret;
83
84 if (sec->m10bmc->flash_bulk_ops)
85 return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size);
86
87 if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
88 return -EINVAL;
89
90 ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
91 if (ret)
92 return ret;
93
94 /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
95 if (leftover_size) {
96 ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
97 if (ret)
98 return ret;
99 memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
100 }
101
102 return 0;
103 }
104
105
106 static ssize_t
show_root_entry_hash(struct device * dev,u32 exp_magic,u32 prog_addr,u32 reh_addr,char * buf)107 show_root_entry_hash(struct device *dev, u32 exp_magic,
108 u32 prog_addr, u32 reh_addr, char *buf)
109 {
110 struct m10bmc_sec *sec = dev_get_drvdata(dev);
111 int sha_num_bytes, i, ret, cnt = 0;
112 u8 hash[REH_SHA384_SIZE];
113 u32 magic;
114
115 ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
116 if (ret)
117 return ret;
118
119 if (FIELD_GET(REH_MAGIC, magic) != exp_magic)
120 return sysfs_emit(buf, "hash not programmed\n");
121
122 sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
123 if (sha_num_bytes != REH_SHA256_SIZE &&
124 sha_num_bytes != REH_SHA384_SIZE) {
125 dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
126 sha_num_bytes);
127 return -EINVAL;
128 }
129
130 ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
131 if (ret) {
132 dev_err(dev, "failed to read root entry hash\n");
133 return ret;
134 }
135
136 for (i = 0; i < sha_num_bytes; i++)
137 cnt += sprintf(buf + cnt, "%02x", hash[i]);
138 cnt += sprintf(buf + cnt, "\n");
139
140 return cnt;
141 }
142
143 #define DEVICE_ATTR_SEC_REH_RO(_name) \
144 static ssize_t _name##_root_entry_hash_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147 { \
148 struct m10bmc_sec *sec = dev_get_drvdata(dev); \
149 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
150 \
151 return show_root_entry_hash(dev, csr_map->_name##_magic, \
152 csr_map->_name##_prog_addr, \
153 csr_map->_name##_reh_addr, \
154 buf); \
155 } \
156 static DEVICE_ATTR_RO(_name##_root_entry_hash)
157
158 DEVICE_ATTR_SEC_REH_RO(bmc);
159 DEVICE_ATTR_SEC_REH_RO(sr);
160 DEVICE_ATTR_SEC_REH_RO(pr);
161
162 #define CSK_BIT_LEN 128U
163 #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
164
165 static ssize_t
show_canceled_csk(struct device * dev,u32 addr,char * buf)166 show_canceled_csk(struct device *dev, u32 addr, char *buf)
167 {
168 unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
169 struct m10bmc_sec *sec = dev_get_drvdata(dev);
170 DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
171 __le32 csk_le32[CSK_32ARRAY_SIZE];
172 u32 csk32[CSK_32ARRAY_SIZE];
173 int ret;
174
175 ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
176 if (ret) {
177 dev_err(sec->dev, "failed to read CSK vector\n");
178 return ret;
179 }
180
181 for (i = 0; i < CSK_32ARRAY_SIZE; i++)
182 csk32[i] = le32_to_cpu(((csk_le32[i])));
183
184 bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN);
185 bitmap_complement(csk_map, csk_map, CSK_BIT_LEN);
186 return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
187 }
188
189 #define DEVICE_ATTR_SEC_CSK_RO(_name) \
190 static ssize_t _name##_canceled_csks_show(struct device *dev, \
191 struct device_attribute *attr, \
192 char *buf) \
193 { \
194 struct m10bmc_sec *sec = dev_get_drvdata(dev); \
195 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
196 \
197 return show_canceled_csk(dev, \
198 csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
199 buf); \
200 } \
201 static DEVICE_ATTR_RO(_name##_canceled_csks)
202
203 #define CSK_VEC_OFFSET 0x34
204
205 DEVICE_ATTR_SEC_CSK_RO(bmc);
206 DEVICE_ATTR_SEC_CSK_RO(sr);
207 DEVICE_ATTR_SEC_CSK_RO(pr);
208
209 #define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
210
flash_count_show(struct device * dev,struct device_attribute * attr,char * buf)211 static ssize_t flash_count_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
213 {
214 struct m10bmc_sec *sec = dev_get_drvdata(dev);
215 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
216 unsigned int num_bits;
217 u8 *flash_buf;
218 int cnt, ret;
219
220 num_bits = FLASH_COUNT_SIZE * 8;
221
222 flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
223 if (!flash_buf)
224 return -ENOMEM;
225
226 ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
227 FLASH_COUNT_SIZE);
228 if (ret) {
229 dev_err(sec->dev, "failed to read flash count\n");
230 goto exit_free;
231 }
232 cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
233
234 exit_free:
235 kfree(flash_buf);
236
237 return ret ? : sysfs_emit(buf, "%u\n", cnt);
238 }
239 static DEVICE_ATTR_RO(flash_count);
240
241 static struct attribute *m10bmc_security_attrs[] = {
242 &dev_attr_flash_count.attr,
243 &dev_attr_bmc_root_entry_hash.attr,
244 &dev_attr_sr_root_entry_hash.attr,
245 &dev_attr_pr_root_entry_hash.attr,
246 &dev_attr_sr_canceled_csks.attr,
247 &dev_attr_pr_canceled_csks.attr,
248 &dev_attr_bmc_canceled_csks.attr,
249 NULL,
250 };
251
252 static struct attribute_group m10bmc_security_attr_group = {
253 .name = "security",
254 .attrs = m10bmc_security_attrs,
255 };
256
257 static const struct attribute_group *m10bmc_sec_attr_groups[] = {
258 &m10bmc_security_attr_group,
259 NULL,
260 };
261
log_error_regs(struct m10bmc_sec * sec,u32 doorbell)262 static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
263 {
264 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
265 u32 auth_result;
266
267 dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
268
269 if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
270 dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
271 }
272
m10bmc_sec_n3000_rsu_status(struct m10bmc_sec * sec)273 static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
274 {
275 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
276 u32 doorbell;
277 int ret;
278
279 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
280 if (ret)
281 return ret;
282
283 return FIELD_GET(DRBL_RSU_STATUS, doorbell);
284 }
285
m10bmc_sec_n6000_rsu_status(struct m10bmc_sec * sec)286 static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec)
287 {
288 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
289 u32 auth_result;
290 int ret;
291
292 ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result);
293 if (ret)
294 return ret;
295
296 return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result);
297 }
298
rsu_status_ok(u32 status)299 static bool rsu_status_ok(u32 status)
300 {
301 return (status == RSU_STAT_NORMAL ||
302 status == RSU_STAT_NIOS_OK ||
303 status == RSU_STAT_USER_OK ||
304 status == RSU_STAT_FACTORY_OK);
305 }
306
rsu_progress_done(u32 progress)307 static bool rsu_progress_done(u32 progress)
308 {
309 return (progress == RSU_PROG_IDLE ||
310 progress == RSU_PROG_RSU_DONE);
311 }
312
rsu_progress_busy(u32 progress)313 static bool rsu_progress_busy(u32 progress)
314 {
315 return (progress == RSU_PROG_AUTHENTICATING ||
316 progress == RSU_PROG_COPYING ||
317 progress == RSU_PROG_UPDATE_CANCEL ||
318 progress == RSU_PROG_PROGRAM_KEY_HASH);
319 }
320
m10bmc_sec_progress_status(struct m10bmc_sec * sec,u32 * doorbell_reg,u32 * progress,u32 * status)321 static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
322 u32 *progress, u32 *status)
323 {
324 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
325 int ret;
326
327 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
328 if (ret)
329 return ret;
330
331 ret = sec->ops->rsu_status(sec);
332 if (ret < 0)
333 return ret;
334
335 *status = ret;
336 *progress = rsu_prog(*doorbell_reg);
337
338 return 0;
339 }
340
rsu_check_idle(struct m10bmc_sec * sec)341 static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
342 {
343 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
344 u32 doorbell;
345 int ret;
346
347 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
348 if (ret)
349 return FW_UPLOAD_ERR_RW_ERROR;
350
351 if (!rsu_progress_done(rsu_prog(doorbell))) {
352 log_error_regs(sec, doorbell);
353 return FW_UPLOAD_ERR_BUSY;
354 }
355
356 return FW_UPLOAD_ERR_NONE;
357 }
358
rsu_start_done(u32 doorbell_reg,u32 progress,u32 status)359 static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
360 {
361 if (doorbell_reg & DRBL_RSU_REQUEST)
362 return false;
363
364 if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
365 return true;
366
367 if (!rsu_progress_done(progress))
368 return true;
369
370 return false;
371 }
372
rsu_update_init(struct m10bmc_sec * sec)373 static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
374 {
375 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
376 u32 doorbell_reg, progress, status;
377 int ret, err;
378
379 ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
380 DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
381 DRBL_RSU_REQUEST |
382 FIELD_PREP(DRBL_HOST_STATUS,
383 HOST_STATUS_IDLE));
384 if (ret)
385 return FW_UPLOAD_ERR_RW_ERROR;
386
387 ret = read_poll_timeout(m10bmc_sec_progress_status, err,
388 err < 0 || rsu_start_done(doorbell_reg, progress, status),
389 NIOS_HANDSHAKE_INTERVAL_US,
390 NIOS_HANDSHAKE_TIMEOUT_US,
391 false,
392 sec, &doorbell_reg, &progress, &status);
393
394 if (ret == -ETIMEDOUT) {
395 log_error_regs(sec, doorbell_reg);
396 return FW_UPLOAD_ERR_TIMEOUT;
397 } else if (err) {
398 return FW_UPLOAD_ERR_RW_ERROR;
399 }
400
401 if (status == RSU_STAT_WEAROUT) {
402 dev_warn(sec->dev, "Excessive flash update count detected\n");
403 return FW_UPLOAD_ERR_WEAROUT;
404 } else if (status == RSU_STAT_ERASE_FAIL) {
405 log_error_regs(sec, doorbell_reg);
406 return FW_UPLOAD_ERR_HW_ERROR;
407 }
408
409 return FW_UPLOAD_ERR_NONE;
410 }
411
rsu_prog_ready(struct m10bmc_sec * sec)412 static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
413 {
414 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
415 unsigned long poll_timeout;
416 u32 doorbell, progress;
417 int ret;
418
419 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
420 if (ret)
421 return FW_UPLOAD_ERR_RW_ERROR;
422
423 poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS);
424 while (rsu_prog(doorbell) == RSU_PROG_PREPARE) {
425 msleep(RSU_PREP_INTERVAL_MS);
426 if (time_after(jiffies, poll_timeout))
427 break;
428
429 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
430 if (ret)
431 return FW_UPLOAD_ERR_RW_ERROR;
432 }
433
434 progress = rsu_prog(doorbell);
435 if (progress == RSU_PROG_PREPARE) {
436 log_error_regs(sec, doorbell);
437 return FW_UPLOAD_ERR_TIMEOUT;
438 } else if (progress != RSU_PROG_READY) {
439 log_error_regs(sec, doorbell);
440 return FW_UPLOAD_ERR_HW_ERROR;
441 }
442
443 return FW_UPLOAD_ERR_NONE;
444 }
445
rsu_send_data(struct m10bmc_sec * sec)446 static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
447 {
448 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
449 u32 doorbell_reg, status;
450 int ret;
451
452 ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
453 DRBL_HOST_STATUS,
454 FIELD_PREP(DRBL_HOST_STATUS,
455 HOST_STATUS_WRITE_DONE));
456 if (ret)
457 return FW_UPLOAD_ERR_RW_ERROR;
458
459 ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
460 csr_map->base + csr_map->doorbell,
461 doorbell_reg,
462 rsu_prog(doorbell_reg) != RSU_PROG_READY,
463 NIOS_HANDSHAKE_INTERVAL_US,
464 NIOS_HANDSHAKE_TIMEOUT_US);
465
466 if (ret == -ETIMEDOUT) {
467 log_error_regs(sec, doorbell_reg);
468 return FW_UPLOAD_ERR_TIMEOUT;
469 } else if (ret) {
470 return FW_UPLOAD_ERR_RW_ERROR;
471 }
472
473 ret = sec->ops->rsu_status(sec);
474 if (ret < 0)
475 return FW_UPLOAD_ERR_HW_ERROR;
476 status = ret;
477
478 if (!rsu_status_ok(status)) {
479 log_error_regs(sec, doorbell_reg);
480 return FW_UPLOAD_ERR_HW_ERROR;
481 }
482
483 return FW_UPLOAD_ERR_NONE;
484 }
485
rsu_check_complete(struct m10bmc_sec * sec,u32 * doorbell_reg)486 static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
487 {
488 u32 progress, status;
489
490 if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
491 return -EIO;
492
493 if (!rsu_status_ok(status))
494 return -EINVAL;
495
496 if (rsu_progress_done(progress))
497 return 0;
498
499 if (rsu_progress_busy(progress))
500 return -EAGAIN;
501
502 return -EINVAL;
503 }
504
rsu_cancel(struct m10bmc_sec * sec)505 static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
506 {
507 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
508 u32 doorbell;
509 int ret;
510
511 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
512 if (ret)
513 return FW_UPLOAD_ERR_RW_ERROR;
514
515 if (rsu_prog(doorbell) != RSU_PROG_READY)
516 return FW_UPLOAD_ERR_BUSY;
517
518 ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
519 DRBL_HOST_STATUS,
520 FIELD_PREP(DRBL_HOST_STATUS,
521 HOST_STATUS_ABORT_RSU));
522 if (ret)
523 return FW_UPLOAD_ERR_RW_ERROR;
524
525 return FW_UPLOAD_ERR_CANCELED;
526 }
527
m10bmc_sec_prepare(struct fw_upload * fwl,const u8 * data,u32 size)528 static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
529 const u8 *data, u32 size)
530 {
531 struct m10bmc_sec *sec = fwl->dd_handle;
532 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
533 u32 ret;
534
535 sec->cancel_request = false;
536
537 if (!size || size > csr_map->staging_size)
538 return FW_UPLOAD_ERR_INVALID_SIZE;
539
540 if (sec->m10bmc->flash_bulk_ops)
541 if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc))
542 return FW_UPLOAD_ERR_BUSY;
543
544 ret = rsu_check_idle(sec);
545 if (ret != FW_UPLOAD_ERR_NONE)
546 goto unlock_flash;
547
548 m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PREPARE);
549
550 ret = rsu_update_init(sec);
551 if (ret != FW_UPLOAD_ERR_NONE)
552 goto fw_state_exit;
553
554 ret = rsu_prog_ready(sec);
555 if (ret != FW_UPLOAD_ERR_NONE)
556 goto fw_state_exit;
557
558 if (sec->cancel_request) {
559 ret = rsu_cancel(sec);
560 goto fw_state_exit;
561 }
562
563 m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_WRITE);
564
565 return FW_UPLOAD_ERR_NONE;
566
567 fw_state_exit:
568 m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
569
570 unlock_flash:
571 if (sec->m10bmc->flash_bulk_ops)
572 sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
573 return ret;
574 }
575
576 #define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
577
m10bmc_sec_fw_write(struct fw_upload * fwl,const u8 * data,u32 offset,u32 size,u32 * written)578 static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
579 u32 offset, u32 size, u32 *written)
580 {
581 struct m10bmc_sec *sec = fwl->dd_handle;
582 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
583 struct intel_m10bmc *m10bmc = sec->m10bmc;
584 u32 blk_size, doorbell;
585 int ret;
586
587 if (sec->cancel_request)
588 return rsu_cancel(sec);
589
590 ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
591 if (ret) {
592 return FW_UPLOAD_ERR_RW_ERROR;
593 } else if (rsu_prog(doorbell) != RSU_PROG_READY) {
594 log_error_regs(sec, doorbell);
595 return FW_UPLOAD_ERR_HW_ERROR;
596 }
597
598 WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
599 blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
600 ret = m10bmc_sec_write(sec, data, offset, blk_size);
601 if (ret)
602 return FW_UPLOAD_ERR_RW_ERROR;
603
604 *written = blk_size;
605 return FW_UPLOAD_ERR_NONE;
606 }
607
m10bmc_sec_poll_complete(struct fw_upload * fwl)608 static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl)
609 {
610 struct m10bmc_sec *sec = fwl->dd_handle;
611 unsigned long poll_timeout;
612 u32 doorbell, result;
613 int ret;
614
615 if (sec->cancel_request)
616 return rsu_cancel(sec);
617
618 m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PROGRAM);
619
620 result = rsu_send_data(sec);
621 if (result != FW_UPLOAD_ERR_NONE)
622 return result;
623
624 poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS);
625 do {
626 msleep(RSU_COMPLETE_INTERVAL_MS);
627 ret = rsu_check_complete(sec, &doorbell);
628 } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout));
629
630 if (ret == -EAGAIN) {
631 log_error_regs(sec, doorbell);
632 return FW_UPLOAD_ERR_TIMEOUT;
633 } else if (ret == -EIO) {
634 return FW_UPLOAD_ERR_RW_ERROR;
635 } else if (ret) {
636 log_error_regs(sec, doorbell);
637 return FW_UPLOAD_ERR_HW_ERROR;
638 }
639
640 return FW_UPLOAD_ERR_NONE;
641 }
642
643 /*
644 * m10bmc_sec_cancel() may be called asynchronously with an on-going update.
645 * All other functions are called sequentially in a single thread. To avoid
646 * contention on register accesses, m10bmc_sec_cancel() must only update
647 * the cancel_request flag. Other functions will check this flag and handle
648 * the cancel request synchronously.
649 */
m10bmc_sec_cancel(struct fw_upload * fwl)650 static void m10bmc_sec_cancel(struct fw_upload *fwl)
651 {
652 struct m10bmc_sec *sec = fwl->dd_handle;
653
654 sec->cancel_request = true;
655 }
656
m10bmc_sec_cleanup(struct fw_upload * fwl)657 static void m10bmc_sec_cleanup(struct fw_upload *fwl)
658 {
659 struct m10bmc_sec *sec = fwl->dd_handle;
660
661 (void)rsu_cancel(sec);
662
663 m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
664
665 if (sec->m10bmc->flash_bulk_ops)
666 sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
667 }
668
669 static const struct fw_upload_ops m10bmc_ops = {
670 .prepare = m10bmc_sec_prepare,
671 .write = m10bmc_sec_fw_write,
672 .poll_complete = m10bmc_sec_poll_complete,
673 .cancel = m10bmc_sec_cancel,
674 .cleanup = m10bmc_sec_cleanup,
675 };
676
677 static const struct m10bmc_sec_ops m10sec_n3000_ops = {
678 .rsu_status = m10bmc_sec_n3000_rsu_status,
679 };
680
681 static const struct m10bmc_sec_ops m10sec_n6000_ops = {
682 .rsu_status = m10bmc_sec_n6000_rsu_status,
683 };
684
685 #define SEC_UPDATE_LEN_MAX 32
m10bmc_sec_probe(struct platform_device * pdev)686 static int m10bmc_sec_probe(struct platform_device *pdev)
687 {
688 char buf[SEC_UPDATE_LEN_MAX];
689 struct m10bmc_sec *sec;
690 struct fw_upload *fwl;
691 unsigned int len;
692 int ret;
693
694 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
695 if (!sec)
696 return -ENOMEM;
697
698 sec->dev = &pdev->dev;
699 sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
700 sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
701 dev_set_drvdata(&pdev->dev, sec);
702
703 ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
704 xa_limit_32b, GFP_KERNEL);
705 if (ret)
706 return ret;
707
708 len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
709 sec->fw_name_id);
710 sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
711 if (!sec->fw_name) {
712 ret = -ENOMEM;
713 goto fw_name_fail;
714 }
715
716 fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
717 &m10bmc_ops, sec);
718 if (IS_ERR(fwl)) {
719 dev_err(sec->dev, "Firmware Upload driver failed to start\n");
720 ret = PTR_ERR(fwl);
721 goto fw_uploader_fail;
722 }
723
724 sec->fwl = fwl;
725 return 0;
726
727 fw_uploader_fail:
728 kfree(sec->fw_name);
729 fw_name_fail:
730 xa_erase(&fw_upload_xa, sec->fw_name_id);
731 return ret;
732 }
733
m10bmc_sec_remove(struct platform_device * pdev)734 static void m10bmc_sec_remove(struct platform_device *pdev)
735 {
736 struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
737
738 firmware_upload_unregister(sec->fwl);
739 kfree(sec->fw_name);
740 xa_erase(&fw_upload_xa, sec->fw_name_id);
741 }
742
743 static const struct platform_device_id intel_m10bmc_sec_ids[] = {
744 {
745 .name = "n3000bmc-sec-update",
746 .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
747 },
748 {
749 .name = "d5005bmc-sec-update",
750 .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
751 },
752 {
753 .name = "n6000bmc-sec-update",
754 .driver_data = (kernel_ulong_t)&m10sec_n6000_ops,
755 },
756 { }
757 };
758 MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
759
760 static struct platform_driver intel_m10bmc_sec_driver = {
761 .probe = m10bmc_sec_probe,
762 .remove_new = m10bmc_sec_remove,
763 .driver = {
764 .name = "intel-m10bmc-sec-update",
765 .dev_groups = m10bmc_sec_attr_groups,
766 },
767 .id_table = intel_m10bmc_sec_ids,
768 };
769 module_platform_driver(intel_m10bmc_sec_driver);
770
771 MODULE_AUTHOR("Intel Corporation");
772 MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
773 MODULE_LICENSE("GPL");
774 MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
775