1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Bluetooth HCI driver for Broadcom 4377/4378/4387/4388 devices attached via PCIe
4  *
5  * Copyright (C) The Asahi Linux Contributors
6  */
7 
8 #include <linux/async.h>
9 #include <linux/bitfield.h>
10 #include <linux/completion.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmi.h>
13 #include <linux/firmware.h>
14 #include <linux/module.h>
15 #include <linux/msi.h>
16 #include <linux/of.h>
17 #include <linux/pci.h>
18 #include <linux/printk.h>
19 
20 #include <linux/unaligned.h>
21 
22 #include <net/bluetooth/bluetooth.h>
23 #include <net/bluetooth/hci_core.h>
24 
25 enum bcm4377_chip {
26 	BCM4377 = 0,
27 	BCM4378,
28 	BCM4387,
29 	BCM4388,
30 };
31 
32 #define BCM4377_DEVICE_ID 0x5fa0
33 #define BCM4378_DEVICE_ID 0x5f69
34 #define BCM4387_DEVICE_ID 0x5f71
35 #define BCM4388_DEVICE_ID 0x5f72
36 
37 #define BCM4377_TIMEOUT msecs_to_jiffies(1000)
38 #define BCM4377_BOOT_TIMEOUT msecs_to_jiffies(5000)
39 
40 /*
41  * These devices only support DMA transactions inside a 32bit window
42  * (possibly to avoid 64 bit arithmetic). The window size cannot exceed
43  * 0xffffffff but is always aligned down to the previous 0x200 byte boundary
44  * which effectively limits the window to [start, start+0xfffffe00].
45  * We just limit the DMA window to [0, 0xfffffe00] to make sure we don't
46  * run into this limitation.
47  */
48 #define BCM4377_DMA_MASK 0xfffffe00
49 
50 #define BCM4377_PCIECFG_BAR0_WINDOW1	   0x80
51 #define BCM4377_PCIECFG_BAR0_WINDOW2	   0x70
52 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1 0x74
53 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW2 0x78
54 #define BCM4377_PCIECFG_BAR2_WINDOW	   0x84
55 
56 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT 0x18011000
57 #define BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT	   0x19000000
58 
59 #define BCM4377_PCIECFG_SUBSYSTEM_CTRL 0x88
60 
61 #define BCM4377_BAR0_FW_DOORBELL 0x140
62 #define BCM4377_BAR0_RTI_CONTROL 0x144
63 
64 #define BCM4377_BAR0_SLEEP_CONTROL	      0x150
65 #define BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE  0
66 #define BCM4377_BAR0_SLEEP_CONTROL_AWAKE      2
67 #define BCM4377_BAR0_SLEEP_CONTROL_QUIESCE    3
68 
69 #define BCM4377_BAR0_DOORBELL	    0x174
70 #define BCM4377_BAR0_DOORBELL_VALUE GENMASK(31, 16)
71 #define BCM4377_BAR0_DOORBELL_IDX   GENMASK(15, 8)
72 #define BCM4377_BAR0_DOORBELL_RING  BIT(5)
73 
74 #define BCM4377_BAR0_HOST_WINDOW_LO   0x590
75 #define BCM4377_BAR0_HOST_WINDOW_HI   0x594
76 #define BCM4377_BAR0_HOST_WINDOW_SIZE 0x598
77 
78 #define BCM4377_BAR2_BOOTSTAGE 0x200454
79 
80 #define BCM4377_BAR2_FW_LO   0x200478
81 #define BCM4377_BAR2_FW_HI   0x20047c
82 #define BCM4377_BAR2_FW_SIZE 0x200480
83 
84 #define BCM4377_BAR2_CONTEXT_ADDR_LO 0x20048c
85 #define BCM4377_BAR2_CONTEXT_ADDR_HI 0x200450
86 
87 #define BCM4377_BAR2_RTI_STATUS	     0x20045c
88 #define BCM4377_BAR2_RTI_WINDOW_LO   0x200494
89 #define BCM4377_BAR2_RTI_WINDOW_HI   0x200498
90 #define BCM4377_BAR2_RTI_WINDOW_SIZE 0x20049c
91 
92 #define BCM4377_OTP_SIZE	  0xe0
93 #define BCM4377_OTP_SYS_VENDOR	  0x15
94 #define BCM4377_OTP_CIS		  0x80
95 #define BCM4377_OTP_VENDOR_HDR	  0x00000008
96 #define BCM4377_OTP_MAX_PARAM_LEN 16
97 
98 #define BCM4377_N_TRANSFER_RINGS   9
99 #define BCM4377_N_COMPLETION_RINGS 6
100 
101 #define BCM4377_MAX_RING_SIZE 256
102 
103 #define BCM4377_MSGID_GENERATION GENMASK(15, 8)
104 #define BCM4377_MSGID_ID	 GENMASK(7, 0)
105 
106 #define BCM4377_RING_N_ENTRIES 128
107 
108 #define BCM4377_CONTROL_MSG_SIZE		   0x34
109 #define BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE (4 * 0xff)
110 
111 #define MAX_ACL_PAYLOAD_SIZE   (HCI_MAX_FRAME_SIZE + HCI_ACL_HDR_SIZE)
112 #define MAX_SCO_PAYLOAD_SIZE   (HCI_MAX_SCO_SIZE + HCI_SCO_HDR_SIZE)
113 #define MAX_EVENT_PAYLOAD_SIZE (HCI_MAX_EVENT_SIZE + HCI_EVENT_HDR_SIZE)
114 
115 enum bcm4377_otp_params_type {
116 	BCM4377_OTP_BOARD_PARAMS,
117 	BCM4377_OTP_CHIP_PARAMS
118 };
119 
120 enum bcm4377_transfer_ring_id {
121 	BCM4377_XFER_RING_CONTROL = 0,
122 	BCM4377_XFER_RING_HCI_H2D = 1,
123 	BCM4377_XFER_RING_HCI_D2H = 2,
124 	BCM4377_XFER_RING_SCO_H2D = 3,
125 	BCM4377_XFER_RING_SCO_D2H = 4,
126 	BCM4377_XFER_RING_ACL_H2D = 5,
127 	BCM4377_XFER_RING_ACL_D2H = 6,
128 };
129 
130 enum bcm4377_completion_ring_id {
131 	BCM4377_ACK_RING_CONTROL = 0,
132 	BCM4377_ACK_RING_HCI_ACL = 1,
133 	BCM4377_EVENT_RING_HCI_ACL = 2,
134 	BCM4377_ACK_RING_SCO = 3,
135 	BCM4377_EVENT_RING_SCO = 4,
136 };
137 
138 enum bcm4377_doorbell {
139 	BCM4377_DOORBELL_CONTROL = 0,
140 	BCM4377_DOORBELL_HCI_H2D = 1,
141 	BCM4377_DOORBELL_HCI_D2H = 2,
142 	BCM4377_DOORBELL_ACL_H2D = 3,
143 	BCM4377_DOORBELL_ACL_D2H = 4,
144 	BCM4377_DOORBELL_SCO = 6,
145 };
146 
147 /*
148  * Transfer ring entry
149  *
150  * flags: Flags to indicate if the payload is appended or mapped
151  * len: Payload length
152  * payload: Optional payload DMA address
153  * id: Message id to recognize the answer in the completion ring entry
154  */
155 struct bcm4377_xfer_ring_entry {
156 #define BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED	 BIT(0)
157 #define BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER BIT(1)
158 	u8 flags;
159 	__le16 len;
160 	u8 _unk0;
161 	__le64 payload;
162 	__le16 id;
163 	u8 _unk1[2];
164 } __packed;
165 static_assert(sizeof(struct bcm4377_xfer_ring_entry) == 0x10);
166 
167 /*
168  * Completion ring entry
169  *
170  * flags: Flags to indicate if the payload is appended or mapped. If the payload
171  *        is mapped it can be found in the buffer of the corresponding transfer
172  *        ring message.
173  * ring_id: Transfer ring ID which required this message
174  * msg_id: Message ID specified in transfer ring entry
175  * len: Payload length
176  */
177 struct bcm4377_completion_ring_entry {
178 	u8 flags;
179 	u8 _unk0;
180 	__le16 ring_id;
181 	__le16 msg_id;
182 	__le32 len;
183 	u8 _unk1[6];
184 } __packed;
185 static_assert(sizeof(struct bcm4377_completion_ring_entry) == 0x10);
186 
187 enum bcm4377_control_message_type {
188 	BCM4377_CONTROL_MSG_CREATE_XFER_RING = 1,
189 	BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING = 2,
190 	BCM4377_CONTROL_MSG_DESTROY_XFER_RING = 3,
191 	BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING = 4,
192 };
193 
194 /*
195  * Control message used to create a completion ring
196  *
197  * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING
198  * header_size: Unknown, but probably reserved space in front of the entry
199  * footer_size: Number of 32 bit words reserved for payloads after the entry
200  * id/id_again: Completion ring index
201  * ring_iova: DMA address of the ring buffer
202  * n_elements: Number of elements inside the ring buffer
203  * msi: MSI index, doesn't work for all rings though and should be zero
204  * intmod_delay: Unknown delay
205  * intmod_bytes: Unknown
206  */
207 struct bcm4377_create_completion_ring_msg {
208 	u8 msg_type;
209 	u8 header_size;
210 	u8 footer_size;
211 	u8 _unk0;
212 	__le16 id;
213 	__le16 id_again;
214 	__le64 ring_iova;
215 	__le16 n_elements;
216 	__le32 unk;
217 	u8 _unk1[6];
218 	__le16 msi;
219 	__le16 intmod_delay;
220 	__le32 intmod_bytes;
221 	__le16 _unk2;
222 	__le32 _unk3;
223 	u8 _unk4[10];
224 } __packed;
225 static_assert(sizeof(struct bcm4377_create_completion_ring_msg) ==
226 	      BCM4377_CONTROL_MSG_SIZE);
227 
228 /*
229  * Control ring message used to destroy a completion ring
230  *
231  * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING
232  * ring_id: Completion ring to be destroyed
233  */
234 struct bcm4377_destroy_completion_ring_msg {
235 	u8 msg_type;
236 	u8 _pad0;
237 	__le16 ring_id;
238 	u8 _pad1[48];
239 } __packed;
240 static_assert(sizeof(struct bcm4377_destroy_completion_ring_msg) ==
241 	      BCM4377_CONTROL_MSG_SIZE);
242 
243 /*
244  * Control message used to create a transfer ring
245  *
246  * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_XFER_RING
247  * header_size: Number of 32 bit words reserved for unknown content before the
248  *              entry
249  * footer_size: Number of 32 bit words reserved for payloads after the entry
250  * ring_id/ring_id_again: Transfer ring index
251  * ring_iova: DMA address of the ring buffer
252  * n_elements: Number of elements inside the ring buffer
253  * completion_ring_id: Completion ring index for acknowledgements and events
254  * doorbell: Doorbell index used to notify device of new entries
255  * flags: Transfer ring flags
256  *          - virtual: set if there is no associated shared memory and only the
257  *                     corresponding completion ring is used
258  *          - sync: only set for the SCO rings
259  */
260 struct bcm4377_create_transfer_ring_msg {
261 	u8 msg_type;
262 	u8 header_size;
263 	u8 footer_size;
264 	u8 _unk0;
265 	__le16 ring_id;
266 	__le16 ring_id_again;
267 	__le64 ring_iova;
268 	u8 _unk1[8];
269 	__le16 n_elements;
270 	__le16 completion_ring_id;
271 	__le16 doorbell;
272 #define BCM4377_XFER_RING_FLAG_VIRTUAL BIT(7)
273 #define BCM4377_XFER_RING_FLAG_SYNC    BIT(8)
274 	__le16 flags;
275 	u8 _unk2[20];
276 } __packed;
277 static_assert(sizeof(struct bcm4377_create_transfer_ring_msg) ==
278 	      BCM4377_CONTROL_MSG_SIZE);
279 
280 /*
281  * Control ring message used to destroy a transfer ring
282  *
283  * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_XFER_RING
284  * ring_id: Transfer ring to be destroyed
285  */
286 struct bcm4377_destroy_transfer_ring_msg {
287 	u8 msg_type;
288 	u8 _pad0;
289 	__le16 ring_id;
290 	u8 _pad1[48];
291 } __packed;
292 static_assert(sizeof(struct bcm4377_destroy_transfer_ring_msg) ==
293 	      BCM4377_CONTROL_MSG_SIZE);
294 
295 /*
296  * "Converged IPC" context struct used to make the device aware of all other
297  * shared memory structures. A pointer to this structure is configured inside a
298  * MMIO register.
299  *
300  * version: Protocol version, must be 2.
301  * size: Size of this structure, must be 0x68.
302  * enabled_caps: Enabled capabilities. Unknown bitfield but should be 2.
303  * peripheral_info_addr: DMA address for a 0x20 buffer to which the device will
304  *                       write unknown contents
305  * {completion,xfer}_ring_{tails,heads}_addr: DMA pointers to ring heads/tails
306  * n_completion_rings: Number of completion rings, the firmware only works if
307  *                     this is set to BCM4377_N_COMPLETION_RINGS.
308  * n_xfer_rings: Number of transfer rings, the firmware only works if
309  *               this is set to BCM4377_N_TRANSFER_RINGS.
310  * control_completion_ring_addr: Control completion ring buffer DMA address
311  * control_xfer_ring_addr: Control transfer ring buffer DMA address
312  * control_xfer_ring_n_entries: Number of control transfer ring entries
313  * control_completion_ring_n_entries: Number of control completion ring entries
314  * control_xfer_ring_doorbell: Control transfer ring doorbell
315  * control_completion_ring_doorbell: Control completion ring doorbell,
316  *                                   must be set to 0xffff
317  * control_xfer_ring_msi: Control completion ring MSI index, must be 0
318  * control_completion_ring_msi: Control completion ring MSI index, must be 0.
319  * control_xfer_ring_header_size: Number of 32 bit words reserved in front of
320  *                                every control transfer ring entry
321  * control_xfer_ring_footer_size: Number of 32 bit words reserved after every
322  *                                control transfer ring entry
323  * control_completion_ring_header_size: Number of 32 bit words reserved in front
324  *                                      of every control completion ring entry
325  * control_completion_ring_footer_size: Number of 32 bit words reserved after
326  *                                      every control completion ring entry
327  * scratch_pad: Optional scratch pad DMA address
328  * scratch_pad_size: Scratch pad size
329  */
330 struct bcm4377_context {
331 	__le16 version;
332 	__le16 size;
333 	__le32 enabled_caps;
334 
335 	__le64 peripheral_info_addr;
336 
337 	/* ring heads and tails */
338 	__le64 completion_ring_heads_addr;
339 	__le64 xfer_ring_tails_addr;
340 	__le64 completion_ring_tails_addr;
341 	__le64 xfer_ring_heads_addr;
342 	__le16 n_completion_rings;
343 	__le16 n_xfer_rings;
344 
345 	/* control ring configuration */
346 	__le64 control_completion_ring_addr;
347 	__le64 control_xfer_ring_addr;
348 	__le16 control_xfer_ring_n_entries;
349 	__le16 control_completion_ring_n_entries;
350 	__le16 control_xfer_ring_doorbell;
351 	__le16 control_completion_ring_doorbell;
352 	__le16 control_xfer_ring_msi;
353 	__le16 control_completion_ring_msi;
354 	u8 control_xfer_ring_header_size;
355 	u8 control_xfer_ring_footer_size;
356 	u8 control_completion_ring_header_size;
357 	u8 control_completion_ring_footer_size;
358 
359 	__le16 _unk0;
360 	__le16 _unk1;
361 
362 	__le64 scratch_pad;
363 	__le32 scratch_pad_size;
364 
365 	__le32 _unk3;
366 } __packed;
367 static_assert(sizeof(struct bcm4377_context) == 0x68);
368 
369 #define BCM4378_CALIBRATION_CHUNK_SIZE 0xe6
370 struct bcm4378_hci_send_calibration_cmd {
371 	u8 unk;
372 	__le16 blocks_left;
373 	u8 data[BCM4378_CALIBRATION_CHUNK_SIZE];
374 } __packed;
375 
376 #define BCM4378_PTB_CHUNK_SIZE 0xcf
377 struct bcm4378_hci_send_ptb_cmd {
378 	__le16 blocks_left;
379 	u8 data[BCM4378_PTB_CHUNK_SIZE];
380 } __packed;
381 
382 /*
383  * Shared memory structure used to store the ring head and tail pointers.
384  */
385 struct bcm4377_ring_state {
386 	__le16 completion_ring_head[BCM4377_N_COMPLETION_RINGS];
387 	__le16 completion_ring_tail[BCM4377_N_COMPLETION_RINGS];
388 	__le16 xfer_ring_head[BCM4377_N_TRANSFER_RINGS];
389 	__le16 xfer_ring_tail[BCM4377_N_TRANSFER_RINGS];
390 };
391 
392 /*
393  * A transfer ring can be used in two configurations:
394  *  1) Send control or HCI messages to the device which are then acknowledged
395  *     in the corresponding completion ring
396  *  2) Receiving HCI frames from the devices. In this case the transfer ring
397  *     itself contains empty messages that are acknowledged once data is
398  *     available from the device. If the payloads fit inside the footers
399  *     of the completion ring the transfer ring can be configured to be
400  *     virtual such that it has no ring buffer.
401  *
402  * ring_id: ring index hardcoded in the firmware
403  * doorbell: doorbell index to notify device of new entries
404  * payload_size: optional in-place payload size
405  * mapped_payload_size: optional out-of-place payload size
406  * completion_ring: index of corresponding completion ring
407  * n_entries: number of entries inside this ring
408  * generation: ring generation; incremented on hci_open to detect stale messages
409  * sync: set to true for SCO rings
410  * virtual: set to true if this ring has no entries and is just required to
411  *          setup a corresponding completion ring for device->host messages
412  * d2h_buffers_only: set to true if this ring is only used to provide large
413  *                   buffers used by device->host messages in the completion
414  *                   ring
415  * allow_wait: allow to wait for messages to be acknowledged
416  * enabled: true once the ring has been created and can be used
417  * ring: ring buffer for entries (struct bcm4377_xfer_ring_entry)
418  * ring_dma: DMA address for ring entry buffer
419  * payloads: payload buffer for mapped_payload_size payloads
420  * payloads_dma:DMA address for payload buffer
421  * events: pointer to array of completions if waiting is allowed
422  * msgids: bitmap to keep track of used message ids
423  * lock: Spinlock to protect access to ring structurs used in the irq handler
424  */
425 struct bcm4377_transfer_ring {
426 	enum bcm4377_transfer_ring_id ring_id;
427 	enum bcm4377_doorbell doorbell;
428 	size_t payload_size;
429 	size_t mapped_payload_size;
430 	u8 completion_ring;
431 	u16 n_entries;
432 	u8 generation;
433 
434 	bool sync;
435 	bool virtual;
436 	bool d2h_buffers_only;
437 	bool allow_wait;
438 	bool enabled;
439 
440 	void *ring;
441 	dma_addr_t ring_dma;
442 
443 	void *payloads;
444 	dma_addr_t payloads_dma;
445 
446 	struct completion **events;
447 	DECLARE_BITMAP(msgids, BCM4377_MAX_RING_SIZE);
448 	spinlock_t lock;
449 };
450 
451 /*
452  * A completion ring can be either used to either acknowledge messages sent in
453  * the corresponding transfer ring or to receive messages associated with the
454  * transfer ring. When used to receive messages the transfer ring either
455  * has no ring buffer and is only advanced ("virtual transfer ring") or it
456  * only contains empty DMA buffers to be used for the payloads.
457  *
458  * ring_id: completion ring id, hardcoded in firmware
459  * payload_size: optional payload size after each entry
460  * delay: unknown delay
461  * n_entries: number of entries in this ring
462  * enabled: true once the ring has been created and can be used
463  * ring: ring buffer for entries (struct bcm4377_completion_ring_entry)
464  * ring_dma: DMA address of ring buffer
465  * transfer_rings: bitmap of corresponding transfer ring ids
466  */
467 struct bcm4377_completion_ring {
468 	enum bcm4377_completion_ring_id ring_id;
469 	u16 payload_size;
470 	u16 delay;
471 	u16 n_entries;
472 	bool enabled;
473 
474 	void *ring;
475 	dma_addr_t ring_dma;
476 
477 	unsigned long transfer_rings;
478 };
479 
480 struct bcm4377_data;
481 
482 /*
483  * Chip-specific configuration struct
484  *
485  * id: Chip id (e.g. 0x4377 for BCM4377)
486  * otp_offset: Offset to the start of the OTP inside BAR0
487  * bar0_window1: Backplane address mapped to the first window in BAR0
488  * bar0_window2: Backplane address mapped to the second window in BAR0
489  * bar0_core2_window2: Optional backplane address mapped to the second core's
490  *                     second window in BAR0
491  * has_bar0_core2_window2: Set to true if this chip requires the second core's
492  *                         second window to be configured
493  * bar2_offset: Offset to the start of the variables in BAR2
494  * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the
495  *                                     vendor-specific subsystem control
496  *                                     register has to be cleared
497  * disable_aspm: Set to true if ASPM must be disabled due to hardware errata
498  * broken_ext_scan: Set to true if the chip erroneously claims to support
499  *                  extended scanning
500  * broken_mws_transport_config: Set to true if the chip erroneously claims to
501  *                              support MWS Transport Configuration
502  * broken_le_ext_adv_report_phy: Set to true if this chip stuffs flags inside
503  *                               reserved bits of Primary/Secondary_PHY inside
504  *                               LE Extended Advertising Report events which
505  *                               have to be ignored
506  * send_calibration: Optional callback to send calibration data
507  * send_ptb: Callback to send "PTB" regulatory/calibration data
508  */
509 struct bcm4377_hw {
510 	unsigned int id;
511 
512 	u32 otp_offset;
513 
514 	u32 bar0_window1;
515 	u32 bar0_window2;
516 	u32 bar0_core2_window2;
517 	u32 bar2_offset;
518 
519 	unsigned long has_bar0_core2_window2 : 1;
520 	unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1;
521 	unsigned long disable_aspm : 1;
522 	unsigned long broken_ext_scan : 1;
523 	unsigned long broken_mws_transport_config : 1;
524 	unsigned long broken_le_coded : 1;
525 	unsigned long broken_le_ext_adv_report_phy : 1;
526 
527 	int (*send_calibration)(struct bcm4377_data *bcm4377);
528 	int (*send_ptb)(struct bcm4377_data *bcm4377,
529 			const struct firmware *fw);
530 };
531 
532 static const struct bcm4377_hw bcm4377_hw_variants[];
533 static const struct dmi_system_id bcm4377_dmi_board_table[];
534 
535 /*
536  * Private struct associated with each device containing global state
537  *
538  * pdev: Pointer to associated struct pci_dev
539  * hdev: Pointer to associated strucy hci_dev
540  * bar0: iomem pointing to BAR0
541  * bar1: iomem pointing to BAR2
542  * bootstage: Current value of the bootstage
543  * rti_status: Current "RTI" status value
544  * hw: Pointer to chip-specific struct bcm4377_hw
545  * taurus_cal_blob: "Taurus" calibration blob used for some chips
546  * taurus_cal_size: "Taurus" calibration blob size
547  * taurus_beamforming_cal_blob: "Taurus" beamforming calibration blob used for
548  *                              some chips
549  * taurus_beamforming_cal_size: "Taurus" beamforming calibration blob size
550  * stepping: Chip stepping read from OTP; used for firmware selection
551  * vendor: Antenna vendor read from OTP; used for firmware selection
552  * board_type: Board type from FDT or DMI match; used for firmware selection
553  * event: Event for changed bootstage or rti_status; used for booting firmware
554  * ctx: "Converged IPC" context
555  * ctx_dma: "Converged IPC" context DMA address
556  * ring_state: Shared memory buffer containing ring head and tail indexes
557  * ring_state_dma: DMA address for ring_state
558  * {control,hci_acl,sco}_ack_ring: Completion rings used to acknowledge messages
559  * {hci_acl,sco}_event_ring: Completion rings used for device->host messages
560  * control_h2d_ring: Transfer ring used for control messages
561  * {hci,sco,acl}_h2d_ring: Transfer ring used to transfer HCI frames
562  * {hci,sco,acl}_d2h_ring: Transfer ring used to receive HCI frames in the
563  *                         corresponding completion ring
564  */
565 struct bcm4377_data {
566 	struct pci_dev *pdev;
567 	struct hci_dev *hdev;
568 
569 	void __iomem *bar0;
570 	void __iomem *bar2;
571 
572 	u32 bootstage;
573 	u32 rti_status;
574 
575 	const struct bcm4377_hw *hw;
576 
577 	const void *taurus_cal_blob;
578 	int taurus_cal_size;
579 	const void *taurus_beamforming_cal_blob;
580 	int taurus_beamforming_cal_size;
581 
582 	char stepping[BCM4377_OTP_MAX_PARAM_LEN];
583 	char vendor[BCM4377_OTP_MAX_PARAM_LEN];
584 	const char *board_type;
585 
586 	struct completion event;
587 
588 	struct bcm4377_context *ctx;
589 	dma_addr_t ctx_dma;
590 
591 	struct bcm4377_ring_state *ring_state;
592 	dma_addr_t ring_state_dma;
593 
594 	/*
595 	 * The HCI and ACL rings have to be merged because this structure is
596 	 * hardcoded in the firmware.
597 	 */
598 	struct bcm4377_completion_ring control_ack_ring;
599 	struct bcm4377_completion_ring hci_acl_ack_ring;
600 	struct bcm4377_completion_ring hci_acl_event_ring;
601 	struct bcm4377_completion_ring sco_ack_ring;
602 	struct bcm4377_completion_ring sco_event_ring;
603 
604 	struct bcm4377_transfer_ring control_h2d_ring;
605 	struct bcm4377_transfer_ring hci_h2d_ring;
606 	struct bcm4377_transfer_ring hci_d2h_ring;
607 	struct bcm4377_transfer_ring sco_h2d_ring;
608 	struct bcm4377_transfer_ring sco_d2h_ring;
609 	struct bcm4377_transfer_ring acl_h2d_ring;
610 	struct bcm4377_transfer_ring acl_d2h_ring;
611 };
612 
bcm4377_ring_doorbell(struct bcm4377_data * bcm4377,u8 doorbell,u16 val)613 static void bcm4377_ring_doorbell(struct bcm4377_data *bcm4377, u8 doorbell,
614 				  u16 val)
615 {
616 	u32 db = 0;
617 
618 	db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_VALUE, val);
619 	db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_IDX, doorbell);
620 	db |= BCM4377_BAR0_DOORBELL_RING;
621 
622 	dev_dbg(&bcm4377->pdev->dev, "write %d to doorbell #%d (0x%x)\n", val,
623 		doorbell, db);
624 	iowrite32(db, bcm4377->bar0 + BCM4377_BAR0_DOORBELL);
625 }
626 
bcm4377_extract_msgid(struct bcm4377_data * bcm4377,struct bcm4377_transfer_ring * ring,u16 raw_msgid,u8 * msgid)627 static int bcm4377_extract_msgid(struct bcm4377_data *bcm4377,
628 				 struct bcm4377_transfer_ring *ring,
629 				 u16 raw_msgid, u8 *msgid)
630 {
631 	u8 generation = FIELD_GET(BCM4377_MSGID_GENERATION, raw_msgid);
632 	*msgid = FIELD_GET(BCM4377_MSGID_ID, raw_msgid);
633 
634 	if (generation != ring->generation) {
635 		dev_warn(
636 			&bcm4377->pdev->dev,
637 			"invalid message generation %d should be %d in entry for ring %d\n",
638 			generation, ring->generation, ring->ring_id);
639 		return -EINVAL;
640 	}
641 
642 	if (*msgid >= ring->n_entries) {
643 		dev_warn(&bcm4377->pdev->dev,
644 			 "invalid message id in entry for ring %d: %d > %d\n",
645 			 ring->ring_id, *msgid, ring->n_entries);
646 		return -EINVAL;
647 	}
648 
649 	return 0;
650 }
651 
bcm4377_handle_event(struct bcm4377_data * bcm4377,struct bcm4377_transfer_ring * ring,u16 raw_msgid,u8 entry_flags,u8 type,void * payload,size_t len)652 static void bcm4377_handle_event(struct bcm4377_data *bcm4377,
653 				 struct bcm4377_transfer_ring *ring,
654 				 u16 raw_msgid, u8 entry_flags, u8 type,
655 				 void *payload, size_t len)
656 {
657 	struct sk_buff *skb;
658 	u16 head;
659 	u8 msgid;
660 	unsigned long flags;
661 
662 	spin_lock_irqsave(&ring->lock, flags);
663 	if (!ring->enabled) {
664 		dev_warn(&bcm4377->pdev->dev,
665 			 "event for disabled transfer ring %d\n",
666 			 ring->ring_id);
667 		goto out;
668 	}
669 
670 	if (ring->d2h_buffers_only &&
671 	    entry_flags & BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED) {
672 		if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid))
673 			goto out;
674 
675 		if (len > ring->mapped_payload_size) {
676 			dev_warn(
677 				&bcm4377->pdev->dev,
678 				"invalid payload len in event for ring %d: %zu > %zu\n",
679 				ring->ring_id, len, ring->mapped_payload_size);
680 			goto out;
681 		}
682 
683 		payload = ring->payloads + msgid * ring->mapped_payload_size;
684 	}
685 
686 	skb = bt_skb_alloc(len, GFP_ATOMIC);
687 	if (!skb)
688 		goto out;
689 
690 	memcpy(skb_put(skb, len), payload, len);
691 	hci_skb_pkt_type(skb) = type;
692 	hci_recv_frame(bcm4377->hdev, skb);
693 
694 out:
695 	head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]);
696 	head = (head + 1) % ring->n_entries;
697 	bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(head);
698 
699 	bcm4377_ring_doorbell(bcm4377, ring->doorbell, head);
700 
701 	spin_unlock_irqrestore(&ring->lock, flags);
702 }
703 
bcm4377_handle_ack(struct bcm4377_data * bcm4377,struct bcm4377_transfer_ring * ring,u16 raw_msgid)704 static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
705 			       struct bcm4377_transfer_ring *ring,
706 			       u16 raw_msgid)
707 {
708 	unsigned long flags;
709 	u8 msgid;
710 
711 	spin_lock_irqsave(&ring->lock, flags);
712 
713 	if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid))
714 		goto unlock;
715 
716 	if (!test_bit(msgid, ring->msgids)) {
717 		dev_warn(
718 			&bcm4377->pdev->dev,
719 			"invalid message id in ack for ring %d: %d is not used\n",
720 			ring->ring_id, msgid);
721 		goto unlock;
722 	}
723 
724 	if (ring->allow_wait && ring->events[msgid]) {
725 		complete(ring->events[msgid]);
726 		ring->events[msgid] = NULL;
727 	}
728 
729 	bitmap_release_region(ring->msgids, msgid, 0);
730 
731 unlock:
732 	spin_unlock_irqrestore(&ring->lock, flags);
733 }
734 
bcm4377_handle_completion(struct bcm4377_data * bcm4377,struct bcm4377_completion_ring * ring,u16 pos)735 static void bcm4377_handle_completion(struct bcm4377_data *bcm4377,
736 				      struct bcm4377_completion_ring *ring,
737 				      u16 pos)
738 {
739 	struct bcm4377_completion_ring_entry *entry;
740 	u16 msg_id, transfer_ring;
741 	size_t entry_size, data_len;
742 	void *data;
743 
744 	if (pos >= ring->n_entries) {
745 		dev_warn(&bcm4377->pdev->dev,
746 			 "invalid offset %d for completion ring %d\n", pos,
747 			 ring->ring_id);
748 		return;
749 	}
750 
751 	entry_size = sizeof(*entry) + ring->payload_size;
752 	entry = ring->ring + pos * entry_size;
753 	data = ring->ring + pos * entry_size + sizeof(*entry);
754 	data_len = le32_to_cpu(entry->len);
755 	msg_id = le16_to_cpu(entry->msg_id);
756 	transfer_ring = le16_to_cpu(entry->ring_id);
757 
758 	if ((ring->transfer_rings & BIT(transfer_ring)) == 0) {
759 		dev_warn(
760 			&bcm4377->pdev->dev,
761 			"invalid entry at offset %d for transfer ring %d in completion ring %d\n",
762 			pos, transfer_ring, ring->ring_id);
763 		return;
764 	}
765 
766 	dev_dbg(&bcm4377->pdev->dev,
767 		"entry in completion ring %d for transfer ring %d with msg_id %d\n",
768 		ring->ring_id, transfer_ring, msg_id);
769 
770 	switch (transfer_ring) {
771 	case BCM4377_XFER_RING_CONTROL:
772 		bcm4377_handle_ack(bcm4377, &bcm4377->control_h2d_ring, msg_id);
773 		break;
774 	case BCM4377_XFER_RING_HCI_H2D:
775 		bcm4377_handle_ack(bcm4377, &bcm4377->hci_h2d_ring, msg_id);
776 		break;
777 	case BCM4377_XFER_RING_SCO_H2D:
778 		bcm4377_handle_ack(bcm4377, &bcm4377->sco_h2d_ring, msg_id);
779 		break;
780 	case BCM4377_XFER_RING_ACL_H2D:
781 		bcm4377_handle_ack(bcm4377, &bcm4377->acl_h2d_ring, msg_id);
782 		break;
783 
784 	case BCM4377_XFER_RING_HCI_D2H:
785 		bcm4377_handle_event(bcm4377, &bcm4377->hci_d2h_ring, msg_id,
786 				     entry->flags, HCI_EVENT_PKT, data,
787 				     data_len);
788 		break;
789 	case BCM4377_XFER_RING_SCO_D2H:
790 		bcm4377_handle_event(bcm4377, &bcm4377->sco_d2h_ring, msg_id,
791 				     entry->flags, HCI_SCODATA_PKT, data,
792 				     data_len);
793 		break;
794 	case BCM4377_XFER_RING_ACL_D2H:
795 		bcm4377_handle_event(bcm4377, &bcm4377->acl_d2h_ring, msg_id,
796 				     entry->flags, HCI_ACLDATA_PKT, data,
797 				     data_len);
798 		break;
799 
800 	default:
801 		dev_warn(
802 			&bcm4377->pdev->dev,
803 			"entry in completion ring %d for unknown transfer ring %d with msg_id %d\n",
804 			ring->ring_id, transfer_ring, msg_id);
805 	}
806 }
807 
bcm4377_poll_completion_ring(struct bcm4377_data * bcm4377,struct bcm4377_completion_ring * ring)808 static void bcm4377_poll_completion_ring(struct bcm4377_data *bcm4377,
809 					 struct bcm4377_completion_ring *ring)
810 {
811 	u16 tail;
812 	__le16 *heads = bcm4377->ring_state->completion_ring_head;
813 	__le16 *tails = bcm4377->ring_state->completion_ring_tail;
814 
815 	if (!ring->enabled)
816 		return;
817 
818 	tail = le16_to_cpu(tails[ring->ring_id]);
819 	dev_dbg(&bcm4377->pdev->dev,
820 		"completion ring #%d: head: %d, tail: %d\n", ring->ring_id,
821 		le16_to_cpu(heads[ring->ring_id]), tail);
822 
823 	while (tail != le16_to_cpu(READ_ONCE(heads[ring->ring_id]))) {
824 		/*
825 		 * ensure the CPU doesn't speculate through the comparison.
826 		 * otherwise it might already read the (empty) queue entry
827 		 * before the updated head has been loaded and checked.
828 		 */
829 		dma_rmb();
830 
831 		bcm4377_handle_completion(bcm4377, ring, tail);
832 
833 		tail = (tail + 1) % ring->n_entries;
834 		tails[ring->ring_id] = cpu_to_le16(tail);
835 	}
836 }
837 
bcm4377_irq(int irq,void * data)838 static irqreturn_t bcm4377_irq(int irq, void *data)
839 {
840 	struct bcm4377_data *bcm4377 = data;
841 	u32 bootstage, rti_status;
842 
843 	bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
844 	rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
845 
846 	if (bootstage != bcm4377->bootstage ||
847 	    rti_status != bcm4377->rti_status) {
848 		dev_dbg(&bcm4377->pdev->dev,
849 			"bootstage = %d -> %d, rti state = %d -> %d\n",
850 			bcm4377->bootstage, bootstage, bcm4377->rti_status,
851 			rti_status);
852 		complete(&bcm4377->event);
853 		bcm4377->bootstage = bootstage;
854 		bcm4377->rti_status = rti_status;
855 	}
856 
857 	if (rti_status > 2)
858 		dev_err(&bcm4377->pdev->dev, "RTI status is %d\n", rti_status);
859 
860 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->control_ack_ring);
861 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
862 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
863 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
864 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_event_ring);
865 
866 	return IRQ_HANDLED;
867 }
868 
bcm4377_enqueue(struct bcm4377_data * bcm4377,struct bcm4377_transfer_ring * ring,void * data,size_t len,bool wait)869 static int bcm4377_enqueue(struct bcm4377_data *bcm4377,
870 			   struct bcm4377_transfer_ring *ring, void *data,
871 			   size_t len, bool wait)
872 {
873 	unsigned long flags;
874 	struct bcm4377_xfer_ring_entry *entry;
875 	void *payload;
876 	size_t offset;
877 	u16 head, tail, new_head;
878 	u16 raw_msgid;
879 	int ret, msgid;
880 	DECLARE_COMPLETION_ONSTACK(event);
881 
882 	if (len > ring->payload_size && len > ring->mapped_payload_size) {
883 		dev_warn(
884 			&bcm4377->pdev->dev,
885 			"payload len %zu is too large for ring %d (max is %zu or %zu)\n",
886 			len, ring->ring_id, ring->payload_size,
887 			ring->mapped_payload_size);
888 		return -EINVAL;
889 	}
890 	if (wait && !ring->allow_wait)
891 		return -EINVAL;
892 	if (ring->virtual)
893 		return -EINVAL;
894 
895 	spin_lock_irqsave(&ring->lock, flags);
896 
897 	head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]);
898 	tail = le16_to_cpu(bcm4377->ring_state->xfer_ring_tail[ring->ring_id]);
899 
900 	new_head = (head + 1) % ring->n_entries;
901 
902 	if (new_head == tail) {
903 		dev_warn(&bcm4377->pdev->dev,
904 			 "can't send message because ring %d is full\n",
905 			 ring->ring_id);
906 		ret = -EINVAL;
907 		goto out;
908 	}
909 
910 	msgid = bitmap_find_free_region(ring->msgids, ring->n_entries, 0);
911 	if (msgid < 0) {
912 		dev_warn(&bcm4377->pdev->dev,
913 			 "can't find message id for ring %d\n", ring->ring_id);
914 		ret = -EINVAL;
915 		goto out;
916 	}
917 
918 	raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, ring->generation);
919 	raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, msgid);
920 
921 	offset = head * (sizeof(*entry) + ring->payload_size);
922 	entry = ring->ring + offset;
923 
924 	memset(entry, 0, sizeof(*entry));
925 	entry->id = cpu_to_le16(raw_msgid);
926 	entry->len = cpu_to_le16(len);
927 
928 	if (len <= ring->payload_size) {
929 		entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER;
930 		payload = ring->ring + offset + sizeof(*entry);
931 	} else {
932 		entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED;
933 		entry->payload = cpu_to_le64(ring->payloads_dma +
934 					     msgid * ring->mapped_payload_size);
935 		payload = ring->payloads + msgid * ring->mapped_payload_size;
936 	}
937 
938 	memcpy(payload, data, len);
939 
940 	if (wait)
941 		ring->events[msgid] = &event;
942 
943 	/*
944 	 * The 4377 chips stop responding to any commands as soon as they
945 	 * have been idle for a while. Poking the sleep control register here
946 	 * makes them come alive again.
947 	 */
948 	iowrite32(BCM4377_BAR0_SLEEP_CONTROL_AWAKE,
949 		  bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
950 
951 	dev_dbg(&bcm4377->pdev->dev,
952 		"updating head for transfer queue #%d to %d\n", ring->ring_id,
953 		new_head);
954 	bcm4377->ring_state->xfer_ring_head[ring->ring_id] =
955 		cpu_to_le16(new_head);
956 
957 	if (!ring->sync)
958 		bcm4377_ring_doorbell(bcm4377, ring->doorbell, new_head);
959 	ret = 0;
960 
961 out:
962 	spin_unlock_irqrestore(&ring->lock, flags);
963 
964 	if (ret == 0 && wait) {
965 		ret = wait_for_completion_interruptible_timeout(
966 			&event, BCM4377_TIMEOUT);
967 		if (ret == 0)
968 			ret = -ETIMEDOUT;
969 		else if (ret > 0)
970 			ret = 0;
971 
972 		spin_lock_irqsave(&ring->lock, flags);
973 		ring->events[msgid] = NULL;
974 		spin_unlock_irqrestore(&ring->lock, flags);
975 	}
976 
977 	return ret;
978 }
979 
bcm4377_create_completion_ring(struct bcm4377_data * bcm4377,struct bcm4377_completion_ring * ring)980 static int bcm4377_create_completion_ring(struct bcm4377_data *bcm4377,
981 					  struct bcm4377_completion_ring *ring)
982 {
983 	struct bcm4377_create_completion_ring_msg msg;
984 	int ret;
985 
986 	if (ring->enabled) {
987 		dev_warn(&bcm4377->pdev->dev,
988 			 "completion ring %d already enabled\n", ring->ring_id);
989 		return 0;
990 	}
991 
992 	memset(ring->ring, 0,
993 	       ring->n_entries * (sizeof(struct bcm4377_completion_ring_entry) +
994 				  ring->payload_size));
995 	memset(&msg, 0, sizeof(msg));
996 	msg.msg_type = BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING;
997 	msg.id = cpu_to_le16(ring->ring_id);
998 	msg.id_again = cpu_to_le16(ring->ring_id);
999 	msg.ring_iova = cpu_to_le64(ring->ring_dma);
1000 	msg.n_elements = cpu_to_le16(ring->n_entries);
1001 	msg.intmod_bytes = cpu_to_le32(0xffffffff);
1002 	msg.unk = cpu_to_le32(0xffffffff);
1003 	msg.intmod_delay = cpu_to_le16(ring->delay);
1004 	msg.footer_size = ring->payload_size / 4;
1005 
1006 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1007 			      sizeof(msg), true);
1008 	if (!ret)
1009 		ring->enabled = true;
1010 
1011 	return ret;
1012 }
1013 
bcm4377_destroy_completion_ring(struct bcm4377_data * bcm4377,struct bcm4377_completion_ring * ring)1014 static int bcm4377_destroy_completion_ring(struct bcm4377_data *bcm4377,
1015 					   struct bcm4377_completion_ring *ring)
1016 {
1017 	struct bcm4377_destroy_completion_ring_msg msg;
1018 	int ret;
1019 
1020 	memset(&msg, 0, sizeof(msg));
1021 	msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING;
1022 	msg.ring_id = cpu_to_le16(ring->ring_id);
1023 
1024 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1025 			      sizeof(msg), true);
1026 	if (ret)
1027 		dev_warn(&bcm4377->pdev->dev,
1028 			 "failed to destroy completion ring %d\n",
1029 			 ring->ring_id);
1030 
1031 	ring->enabled = false;
1032 	return ret;
1033 }
1034 
bcm4377_create_transfer_ring(struct bcm4377_data * bcm4377,struct bcm4377_transfer_ring * ring)1035 static int bcm4377_create_transfer_ring(struct bcm4377_data *bcm4377,
1036 					struct bcm4377_transfer_ring *ring)
1037 {
1038 	struct bcm4377_create_transfer_ring_msg msg;
1039 	u16 flags = 0;
1040 	int ret, i;
1041 	unsigned long spinlock_flags;
1042 
1043 	if (ring->virtual)
1044 		flags |= BCM4377_XFER_RING_FLAG_VIRTUAL;
1045 	if (ring->sync)
1046 		flags |= BCM4377_XFER_RING_FLAG_SYNC;
1047 
1048 	spin_lock_irqsave(&ring->lock, spinlock_flags);
1049 	memset(&msg, 0, sizeof(msg));
1050 	msg.msg_type = BCM4377_CONTROL_MSG_CREATE_XFER_RING;
1051 	msg.ring_id = cpu_to_le16(ring->ring_id);
1052 	msg.ring_id_again = cpu_to_le16(ring->ring_id);
1053 	msg.ring_iova = cpu_to_le64(ring->ring_dma);
1054 	msg.n_elements = cpu_to_le16(ring->n_entries);
1055 	msg.completion_ring_id = cpu_to_le16(ring->completion_ring);
1056 	msg.doorbell = cpu_to_le16(ring->doorbell);
1057 	msg.flags = cpu_to_le16(flags);
1058 	msg.footer_size = ring->payload_size / 4;
1059 
1060 	bcm4377->ring_state->xfer_ring_head[ring->ring_id] = 0;
1061 	bcm4377->ring_state->xfer_ring_tail[ring->ring_id] = 0;
1062 	ring->generation++;
1063 	spin_unlock_irqrestore(&ring->lock, spinlock_flags);
1064 
1065 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1066 			      sizeof(msg), true);
1067 
1068 	spin_lock_irqsave(&ring->lock, spinlock_flags);
1069 
1070 	if (ring->d2h_buffers_only) {
1071 		for (i = 0; i < ring->n_entries; ++i) {
1072 			struct bcm4377_xfer_ring_entry *entry =
1073 				ring->ring + i * sizeof(*entry);
1074 			u16 raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION,
1075 						   ring->generation);
1076 			raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, i);
1077 
1078 			memset(entry, 0, sizeof(*entry));
1079 			entry->id = cpu_to_le16(raw_msgid);
1080 			entry->len = cpu_to_le16(ring->mapped_payload_size);
1081 			entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED;
1082 			entry->payload =
1083 				cpu_to_le64(ring->payloads_dma +
1084 					    i * ring->mapped_payload_size);
1085 		}
1086 	}
1087 
1088 	/*
1089 	 * send some messages if this is a device->host ring to allow the device
1090 	 * to reply by acknowledging them in the completion ring
1091 	 */
1092 	if (ring->virtual || ring->d2h_buffers_only) {
1093 		bcm4377->ring_state->xfer_ring_head[ring->ring_id] =
1094 			cpu_to_le16(0xf);
1095 		bcm4377_ring_doorbell(bcm4377, ring->doorbell, 0xf);
1096 	}
1097 
1098 	ring->enabled = true;
1099 	spin_unlock_irqrestore(&ring->lock, spinlock_flags);
1100 
1101 	return ret;
1102 }
1103 
bcm4377_destroy_transfer_ring(struct bcm4377_data * bcm4377,struct bcm4377_transfer_ring * ring)1104 static int bcm4377_destroy_transfer_ring(struct bcm4377_data *bcm4377,
1105 					 struct bcm4377_transfer_ring *ring)
1106 {
1107 	struct bcm4377_destroy_transfer_ring_msg msg;
1108 	int ret;
1109 
1110 	memset(&msg, 0, sizeof(msg));
1111 	msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_XFER_RING;
1112 	msg.ring_id = cpu_to_le16(ring->ring_id);
1113 
1114 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1115 			      sizeof(msg), true);
1116 	if (ret)
1117 		dev_warn(&bcm4377->pdev->dev,
1118 			 "failed to destroy transfer ring %d\n", ring->ring_id);
1119 
1120 	ring->enabled = false;
1121 	return ret;
1122 }
1123 
__bcm4378_send_calibration_chunk(struct bcm4377_data * bcm4377,const void * data,size_t data_len,u16 blocks_left)1124 static int __bcm4378_send_calibration_chunk(struct bcm4377_data *bcm4377,
1125 					    const void *data, size_t data_len,
1126 					    u16 blocks_left)
1127 {
1128 	struct bcm4378_hci_send_calibration_cmd cmd;
1129 	struct sk_buff *skb;
1130 
1131 	if (data_len > sizeof(cmd.data))
1132 		return -EINVAL;
1133 
1134 	memset(&cmd, 0, sizeof(cmd));
1135 	cmd.unk = 0x03;
1136 	cmd.blocks_left = cpu_to_le16(blocks_left);
1137 	memcpy(cmd.data, data, data_len);
1138 
1139 	skb = __hci_cmd_sync(bcm4377->hdev, 0xfd97, sizeof(cmd), &cmd,
1140 			     HCI_INIT_TIMEOUT);
1141 	if (IS_ERR(skb))
1142 		return PTR_ERR(skb);
1143 
1144 	kfree_skb(skb);
1145 	return 0;
1146 }
1147 
__bcm4378_send_calibration(struct bcm4377_data * bcm4377,const void * data,size_t data_size)1148 static int __bcm4378_send_calibration(struct bcm4377_data *bcm4377,
1149 				      const void *data, size_t data_size)
1150 {
1151 	int ret;
1152 	size_t i, left, transfer_len;
1153 	size_t blocks =
1154 		DIV_ROUND_UP(data_size, (size_t)BCM4378_CALIBRATION_CHUNK_SIZE);
1155 
1156 	if (!data) {
1157 		dev_err(&bcm4377->pdev->dev,
1158 			"no calibration data available.\n");
1159 		return -ENOENT;
1160 	}
1161 
1162 	for (i = 0, left = data_size; i < blocks; ++i, left -= transfer_len) {
1163 		transfer_len =
1164 			min_t(size_t, left, BCM4378_CALIBRATION_CHUNK_SIZE);
1165 
1166 		ret = __bcm4378_send_calibration_chunk(
1167 			bcm4377, data + i * BCM4378_CALIBRATION_CHUNK_SIZE,
1168 			transfer_len, blocks - i - 1);
1169 		if (ret) {
1170 			dev_err(&bcm4377->pdev->dev,
1171 				"send calibration chunk failed with %d\n", ret);
1172 			return ret;
1173 		}
1174 	}
1175 
1176 	return 0;
1177 }
1178 
bcm4378_send_calibration(struct bcm4377_data * bcm4377)1179 static int bcm4378_send_calibration(struct bcm4377_data *bcm4377)
1180 {
1181 	if ((strcmp(bcm4377->stepping, "b1") == 0) ||
1182 	    strcmp(bcm4377->stepping, "b3") == 0)
1183 		return __bcm4378_send_calibration(
1184 			bcm4377, bcm4377->taurus_beamforming_cal_blob,
1185 			bcm4377->taurus_beamforming_cal_size);
1186 	else
1187 		return __bcm4378_send_calibration(bcm4377,
1188 						  bcm4377->taurus_cal_blob,
1189 						  bcm4377->taurus_cal_size);
1190 }
1191 
bcm4387_send_calibration(struct bcm4377_data * bcm4377)1192 static int bcm4387_send_calibration(struct bcm4377_data *bcm4377)
1193 {
1194 	if (strcmp(bcm4377->stepping, "c2") == 0)
1195 		return __bcm4378_send_calibration(
1196 			bcm4377, bcm4377->taurus_beamforming_cal_blob,
1197 			bcm4377->taurus_beamforming_cal_size);
1198 	else
1199 		return __bcm4378_send_calibration(bcm4377,
1200 						  bcm4377->taurus_cal_blob,
1201 						  bcm4377->taurus_cal_size);
1202 }
1203 
bcm4388_send_calibration(struct bcm4377_data * bcm4377)1204 static int bcm4388_send_calibration(struct bcm4377_data *bcm4377)
1205 {
1206 	/* BCM4388 always uses beamforming */
1207 	return __bcm4378_send_calibration(
1208 		bcm4377, bcm4377->taurus_beamforming_cal_blob,
1209 		bcm4377->taurus_beamforming_cal_size);
1210 }
1211 
bcm4377_request_blob(struct bcm4377_data * bcm4377,const char * suffix)1212 static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377,
1213 						   const char *suffix)
1214 {
1215 	const struct firmware *fw;
1216 	char name0[64], name1[64];
1217 	int ret;
1218 
1219 	snprintf(name0, sizeof(name0), "brcm/brcmbt%04x%s-%s-%s.%s",
1220 		 bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type,
1221 		 bcm4377->vendor, suffix);
1222 	snprintf(name1, sizeof(name1), "brcm/brcmbt%04x%s-%s.%s",
1223 		 bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type,
1224 		 suffix);
1225 	dev_dbg(&bcm4377->pdev->dev, "Trying to load firmware: '%s' or '%s'\n",
1226 		name0, name1);
1227 
1228 	ret = firmware_request_nowarn(&fw, name0, &bcm4377->pdev->dev);
1229 	if (!ret)
1230 		return fw;
1231 	ret = firmware_request_nowarn(&fw, name1, &bcm4377->pdev->dev);
1232 	if (!ret)
1233 		return fw;
1234 
1235 	dev_err(&bcm4377->pdev->dev,
1236 		"Unable to load firmware; tried '%s' and '%s'\n", name0, name1);
1237 	return NULL;
1238 }
1239 
bcm4377_send_ptb(struct bcm4377_data * bcm4377,const struct firmware * fw)1240 static int bcm4377_send_ptb(struct bcm4377_data *bcm4377,
1241 			    const struct firmware *fw)
1242 {
1243 	struct sk_buff *skb;
1244 
1245 	skb = __hci_cmd_sync(bcm4377->hdev, 0xfd98, fw->size, fw->data,
1246 			     HCI_INIT_TIMEOUT);
1247 	/*
1248 	 * This command seems to always fail on more recent firmware versions
1249 	 * (even in traces taken from the macOS driver). It's unclear why this
1250 	 * happens but because the PTB file contains calibration and/or
1251 	 * regulatory data and may be required on older firmware we still try to
1252 	 * send it here just in case and just ignore if it fails.
1253 	 */
1254 	if (!IS_ERR(skb))
1255 		kfree_skb(skb);
1256 	return 0;
1257 }
1258 
bcm4378_send_ptb_chunk(struct bcm4377_data * bcm4377,const void * data,size_t data_len,u16 blocks_left)1259 static int bcm4378_send_ptb_chunk(struct bcm4377_data *bcm4377,
1260 				  const void *data, size_t data_len,
1261 				  u16 blocks_left)
1262 {
1263 	struct bcm4378_hci_send_ptb_cmd cmd;
1264 	struct sk_buff *skb;
1265 
1266 	if (data_len > BCM4378_PTB_CHUNK_SIZE)
1267 		return -EINVAL;
1268 
1269 	memset(&cmd, 0, sizeof(cmd));
1270 	cmd.blocks_left = cpu_to_le16(blocks_left);
1271 	memcpy(cmd.data, data, data_len);
1272 
1273 	skb = __hci_cmd_sync(bcm4377->hdev, 0xfe0d, sizeof(cmd), &cmd,
1274 			     HCI_INIT_TIMEOUT);
1275 	if (IS_ERR(skb))
1276 		return PTR_ERR(skb);
1277 
1278 	kfree_skb(skb);
1279 	return 0;
1280 }
1281 
bcm4378_send_ptb(struct bcm4377_data * bcm4377,const struct firmware * fw)1282 static int bcm4378_send_ptb(struct bcm4377_data *bcm4377,
1283 			    const struct firmware *fw)
1284 {
1285 	size_t chunks = DIV_ROUND_UP(fw->size, (size_t)BCM4378_PTB_CHUNK_SIZE);
1286 	size_t i, left, transfer_len;
1287 	int ret;
1288 
1289 	for (i = 0, left = fw->size; i < chunks; ++i, left -= transfer_len) {
1290 		transfer_len = min_t(size_t, left, BCM4378_PTB_CHUNK_SIZE);
1291 
1292 		dev_dbg(&bcm4377->pdev->dev, "sending ptb chunk %zu/%zu\n",
1293 			i + 1, chunks);
1294 		ret = bcm4378_send_ptb_chunk(
1295 			bcm4377, fw->data + i * BCM4378_PTB_CHUNK_SIZE,
1296 			transfer_len, chunks - i - 1);
1297 		if (ret) {
1298 			dev_err(&bcm4377->pdev->dev,
1299 				"sending ptb chunk %zu failed (%d)", i, ret);
1300 			return ret;
1301 		}
1302 	}
1303 
1304 	return 0;
1305 }
1306 
bcm4377_hci_open(struct hci_dev * hdev)1307 static int bcm4377_hci_open(struct hci_dev *hdev)
1308 {
1309 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1310 	int ret;
1311 
1312 	dev_dbg(&bcm4377->pdev->dev, "creating rings\n");
1313 
1314 	ret = bcm4377_create_completion_ring(bcm4377,
1315 					     &bcm4377->hci_acl_ack_ring);
1316 	if (ret)
1317 		return ret;
1318 	ret = bcm4377_create_completion_ring(bcm4377,
1319 					     &bcm4377->hci_acl_event_ring);
1320 	if (ret)
1321 		goto destroy_hci_acl_ack;
1322 	ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1323 	if (ret)
1324 		goto destroy_hci_acl_event;
1325 	ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1326 	if (ret)
1327 		goto destroy_sco_ack;
1328 	dev_dbg(&bcm4377->pdev->dev,
1329 		"all completion rings successfully created!\n");
1330 
1331 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1332 	if (ret)
1333 		goto destroy_sco_event;
1334 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1335 	if (ret)
1336 		goto destroy_hci_h2d;
1337 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1338 	if (ret)
1339 		goto destroy_hci_d2h;
1340 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1341 	if (ret)
1342 		goto destroy_sco_h2d;
1343 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1344 	if (ret)
1345 		goto destroy_sco_d2h;
1346 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
1347 	if (ret)
1348 		goto destroy_acl_h2d;
1349 	dev_dbg(&bcm4377->pdev->dev,
1350 		"all transfer rings successfully created!\n");
1351 
1352 	return 0;
1353 
1354 destroy_acl_h2d:
1355 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1356 destroy_sco_d2h:
1357 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1358 destroy_sco_h2d:
1359 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1360 destroy_hci_d2h:
1361 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1362 destroy_hci_h2d:
1363 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1364 destroy_sco_event:
1365 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1366 destroy_sco_ack:
1367 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1368 destroy_hci_acl_event:
1369 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
1370 destroy_hci_acl_ack:
1371 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
1372 
1373 	dev_err(&bcm4377->pdev->dev, "Creating rings failed with %d\n", ret);
1374 	return ret;
1375 }
1376 
bcm4377_hci_close(struct hci_dev * hdev)1377 static int bcm4377_hci_close(struct hci_dev *hdev)
1378 {
1379 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1380 
1381 	dev_dbg(&bcm4377->pdev->dev, "destroying rings in hci_close\n");
1382 
1383 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
1384 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1385 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1386 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1387 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1388 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1389 
1390 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1391 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1392 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
1393 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
1394 
1395 	return 0;
1396 }
1397 
bcm4377_is_valid_bdaddr(struct bcm4377_data * bcm4377,bdaddr_t * addr)1398 static bool bcm4377_is_valid_bdaddr(struct bcm4377_data *bcm4377,
1399 				    bdaddr_t *addr)
1400 {
1401 	if (addr->b[0] != 0x93)
1402 		return true;
1403 	if (addr->b[1] != 0x76)
1404 		return true;
1405 	if (addr->b[2] != 0x00)
1406 		return true;
1407 	if (addr->b[4] != (bcm4377->hw->id & 0xff))
1408 		return true;
1409 	if (addr->b[5] != (bcm4377->hw->id >> 8))
1410 		return true;
1411 	return false;
1412 }
1413 
bcm4377_check_bdaddr(struct bcm4377_data * bcm4377)1414 static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
1415 {
1416 	struct hci_rp_read_bd_addr *bda;
1417 	struct sk_buff *skb;
1418 
1419 	skb = __hci_cmd_sync(bcm4377->hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
1420 			     HCI_INIT_TIMEOUT);
1421 	if (IS_ERR(skb)) {
1422 		int err = PTR_ERR(skb);
1423 
1424 		dev_err(&bcm4377->pdev->dev, "HCI_OP_READ_BD_ADDR failed (%d)",
1425 			err);
1426 		return err;
1427 	}
1428 
1429 	if (skb->len != sizeof(*bda)) {
1430 		dev_err(&bcm4377->pdev->dev,
1431 			"HCI_OP_READ_BD_ADDR reply length invalid");
1432 		kfree_skb(skb);
1433 		return -EIO;
1434 	}
1435 
1436 	bda = (struct hci_rp_read_bd_addr *)skb->data;
1437 	if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
1438 		set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
1439 
1440 	kfree_skb(skb);
1441 	return 0;
1442 }
1443 
bcm4377_hci_setup(struct hci_dev * hdev)1444 static int bcm4377_hci_setup(struct hci_dev *hdev)
1445 {
1446 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1447 	const struct firmware *fw;
1448 	int ret;
1449 
1450 	if (bcm4377->hw->send_calibration) {
1451 		ret = bcm4377->hw->send_calibration(bcm4377);
1452 		if (ret)
1453 			return ret;
1454 	}
1455 
1456 	fw = bcm4377_request_blob(bcm4377, "ptb");
1457 	if (!fw) {
1458 		dev_err(&bcm4377->pdev->dev, "failed to load PTB data");
1459 		return -ENOENT;
1460 	}
1461 
1462 	ret = bcm4377->hw->send_ptb(bcm4377, fw);
1463 	release_firmware(fw);
1464 	if (ret)
1465 		return ret;
1466 
1467 	return bcm4377_check_bdaddr(bcm4377);
1468 }
1469 
bcm4377_hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)1470 static int bcm4377_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1471 {
1472 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1473 	struct bcm4377_transfer_ring *ring;
1474 	int ret;
1475 
1476 	switch (hci_skb_pkt_type(skb)) {
1477 	case HCI_COMMAND_PKT:
1478 		hdev->stat.cmd_tx++;
1479 		ring = &bcm4377->hci_h2d_ring;
1480 		break;
1481 
1482 	case HCI_ACLDATA_PKT:
1483 		hdev->stat.acl_tx++;
1484 		ring = &bcm4377->acl_h2d_ring;
1485 		break;
1486 
1487 	case HCI_SCODATA_PKT:
1488 		hdev->stat.sco_tx++;
1489 		ring = &bcm4377->sco_h2d_ring;
1490 		break;
1491 
1492 	default:
1493 		return -EILSEQ;
1494 	}
1495 
1496 	ret = bcm4377_enqueue(bcm4377, ring, skb->data, skb->len, false);
1497 	if (ret < 0) {
1498 		hdev->stat.err_tx++;
1499 		return ret;
1500 	}
1501 
1502 	hdev->stat.byte_tx += skb->len;
1503 	kfree_skb(skb);
1504 	return ret;
1505 }
1506 
bcm4377_hci_set_bdaddr(struct hci_dev * hdev,const bdaddr_t * bdaddr)1507 static int bcm4377_hci_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
1508 {
1509 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1510 	struct sk_buff *skb;
1511 	int err;
1512 
1513 	skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
1514 	if (IS_ERR(skb)) {
1515 		err = PTR_ERR(skb);
1516 		dev_err(&bcm4377->pdev->dev,
1517 			"Change address command failed (%d)", err);
1518 		return err;
1519 	}
1520 	kfree_skb(skb);
1521 
1522 	return 0;
1523 }
1524 
bcm4377_alloc_transfer_ring(struct bcm4377_data * bcm4377,struct bcm4377_transfer_ring * ring)1525 static int bcm4377_alloc_transfer_ring(struct bcm4377_data *bcm4377,
1526 				       struct bcm4377_transfer_ring *ring)
1527 {
1528 	size_t entry_size;
1529 
1530 	spin_lock_init(&ring->lock);
1531 	ring->payload_size = ALIGN(ring->payload_size, 4);
1532 	ring->mapped_payload_size = ALIGN(ring->mapped_payload_size, 4);
1533 
1534 	if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE)
1535 		return -EINVAL;
1536 	if (ring->n_entries > BCM4377_MAX_RING_SIZE)
1537 		return -EINVAL;
1538 	if (ring->virtual && ring->allow_wait)
1539 		return -EINVAL;
1540 
1541 	if (ring->d2h_buffers_only) {
1542 		if (ring->virtual)
1543 			return -EINVAL;
1544 		if (ring->payload_size)
1545 			return -EINVAL;
1546 		if (!ring->mapped_payload_size)
1547 			return -EINVAL;
1548 	}
1549 	if (ring->virtual)
1550 		return 0;
1551 
1552 	entry_size =
1553 		ring->payload_size + sizeof(struct bcm4377_xfer_ring_entry);
1554 	ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev,
1555 					 ring->n_entries * entry_size,
1556 					 &ring->ring_dma, GFP_KERNEL);
1557 	if (!ring->ring)
1558 		return -ENOMEM;
1559 
1560 	if (ring->allow_wait) {
1561 		ring->events = devm_kcalloc(&bcm4377->pdev->dev,
1562 					    ring->n_entries,
1563 					    sizeof(*ring->events), GFP_KERNEL);
1564 		if (!ring->events)
1565 			return -ENOMEM;
1566 	}
1567 
1568 	if (ring->mapped_payload_size) {
1569 		ring->payloads = dmam_alloc_coherent(
1570 			&bcm4377->pdev->dev,
1571 			ring->n_entries * ring->mapped_payload_size,
1572 			&ring->payloads_dma, GFP_KERNEL);
1573 		if (!ring->payloads)
1574 			return -ENOMEM;
1575 	}
1576 
1577 	return 0;
1578 }
1579 
bcm4377_alloc_completion_ring(struct bcm4377_data * bcm4377,struct bcm4377_completion_ring * ring)1580 static int bcm4377_alloc_completion_ring(struct bcm4377_data *bcm4377,
1581 					 struct bcm4377_completion_ring *ring)
1582 {
1583 	size_t entry_size;
1584 
1585 	ring->payload_size = ALIGN(ring->payload_size, 4);
1586 	if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE)
1587 		return -EINVAL;
1588 	if (ring->n_entries > BCM4377_MAX_RING_SIZE)
1589 		return -EINVAL;
1590 
1591 	entry_size = ring->payload_size +
1592 		     sizeof(struct bcm4377_completion_ring_entry);
1593 
1594 	ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev,
1595 					 ring->n_entries * entry_size,
1596 					 &ring->ring_dma, GFP_KERNEL);
1597 	if (!ring->ring)
1598 		return -ENOMEM;
1599 	return 0;
1600 }
1601 
bcm4377_init_context(struct bcm4377_data * bcm4377)1602 static int bcm4377_init_context(struct bcm4377_data *bcm4377)
1603 {
1604 	struct device *dev = &bcm4377->pdev->dev;
1605 	dma_addr_t peripheral_info_dma;
1606 
1607 	bcm4377->ctx = dmam_alloc_coherent(dev, sizeof(*bcm4377->ctx),
1608 					   &bcm4377->ctx_dma, GFP_KERNEL);
1609 	if (!bcm4377->ctx)
1610 		return -ENOMEM;
1611 	memset(bcm4377->ctx, 0, sizeof(*bcm4377->ctx));
1612 
1613 	bcm4377->ring_state =
1614 		dmam_alloc_coherent(dev, sizeof(*bcm4377->ring_state),
1615 				    &bcm4377->ring_state_dma, GFP_KERNEL);
1616 	if (!bcm4377->ring_state)
1617 		return -ENOMEM;
1618 	memset(bcm4377->ring_state, 0, sizeof(*bcm4377->ring_state));
1619 
1620 	bcm4377->ctx->version = cpu_to_le16(1);
1621 	bcm4377->ctx->size = cpu_to_le16(sizeof(*bcm4377->ctx));
1622 	bcm4377->ctx->enabled_caps = cpu_to_le32(2);
1623 
1624 	/*
1625 	 * The BT device will write 0x20 bytes of data to this buffer but
1626 	 * the exact contents are unknown. It only needs to exist for BT
1627 	 * to work such that we can just allocate and then ignore it.
1628 	 */
1629 	if (!dmam_alloc_coherent(&bcm4377->pdev->dev, 0x20,
1630 				 &peripheral_info_dma, GFP_KERNEL))
1631 		return -ENOMEM;
1632 	bcm4377->ctx->peripheral_info_addr = cpu_to_le64(peripheral_info_dma);
1633 
1634 	bcm4377->ctx->xfer_ring_heads_addr = cpu_to_le64(
1635 		bcm4377->ring_state_dma +
1636 		offsetof(struct bcm4377_ring_state, xfer_ring_head));
1637 	bcm4377->ctx->xfer_ring_tails_addr = cpu_to_le64(
1638 		bcm4377->ring_state_dma +
1639 		offsetof(struct bcm4377_ring_state, xfer_ring_tail));
1640 	bcm4377->ctx->completion_ring_heads_addr = cpu_to_le64(
1641 		bcm4377->ring_state_dma +
1642 		offsetof(struct bcm4377_ring_state, completion_ring_head));
1643 	bcm4377->ctx->completion_ring_tails_addr = cpu_to_le64(
1644 		bcm4377->ring_state_dma +
1645 		offsetof(struct bcm4377_ring_state, completion_ring_tail));
1646 
1647 	bcm4377->ctx->n_completion_rings =
1648 		cpu_to_le16(BCM4377_N_COMPLETION_RINGS);
1649 	bcm4377->ctx->n_xfer_rings = cpu_to_le16(BCM4377_N_TRANSFER_RINGS);
1650 
1651 	bcm4377->ctx->control_completion_ring_addr =
1652 		cpu_to_le64(bcm4377->control_ack_ring.ring_dma);
1653 	bcm4377->ctx->control_completion_ring_n_entries =
1654 		cpu_to_le16(bcm4377->control_ack_ring.n_entries);
1655 	bcm4377->ctx->control_completion_ring_doorbell = cpu_to_le16(0xffff);
1656 	bcm4377->ctx->control_completion_ring_msi = 0;
1657 	bcm4377->ctx->control_completion_ring_header_size = 0;
1658 	bcm4377->ctx->control_completion_ring_footer_size = 0;
1659 
1660 	bcm4377->ctx->control_xfer_ring_addr =
1661 		cpu_to_le64(bcm4377->control_h2d_ring.ring_dma);
1662 	bcm4377->ctx->control_xfer_ring_n_entries =
1663 		cpu_to_le16(bcm4377->control_h2d_ring.n_entries);
1664 	bcm4377->ctx->control_xfer_ring_doorbell =
1665 		cpu_to_le16(bcm4377->control_h2d_ring.doorbell);
1666 	bcm4377->ctx->control_xfer_ring_msi = 0;
1667 	bcm4377->ctx->control_xfer_ring_header_size = 0;
1668 	bcm4377->ctx->control_xfer_ring_footer_size =
1669 		bcm4377->control_h2d_ring.payload_size / 4;
1670 
1671 	dev_dbg(&bcm4377->pdev->dev, "context initialized at IOVA %pad",
1672 		&bcm4377->ctx_dma);
1673 
1674 	return 0;
1675 }
1676 
bcm4377_prepare_rings(struct bcm4377_data * bcm4377)1677 static int bcm4377_prepare_rings(struct bcm4377_data *bcm4377)
1678 {
1679 	int ret;
1680 
1681 	/*
1682 	 * Even though many of these settings appear to be configurable
1683 	 * when sending the "create ring" messages most of these are
1684 	 * actually hardcoded in some (and quite possibly all) firmware versions
1685 	 * and changing them on the host has no effect.
1686 	 * Specifically, this applies to at least the doorbells, the transfer
1687 	 * and completion ring ids and their mapping (e.g. both HCI and ACL
1688 	 * entries will always be queued in completion rings 1 and 2 no matter
1689 	 * what we configure here).
1690 	 */
1691 	bcm4377->control_ack_ring.ring_id = BCM4377_ACK_RING_CONTROL;
1692 	bcm4377->control_ack_ring.n_entries = 32;
1693 	bcm4377->control_ack_ring.transfer_rings =
1694 		BIT(BCM4377_XFER_RING_CONTROL);
1695 
1696 	bcm4377->hci_acl_ack_ring.ring_id = BCM4377_ACK_RING_HCI_ACL;
1697 	bcm4377->hci_acl_ack_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES;
1698 	bcm4377->hci_acl_ack_ring.transfer_rings =
1699 		BIT(BCM4377_XFER_RING_HCI_H2D) | BIT(BCM4377_XFER_RING_ACL_H2D);
1700 	bcm4377->hci_acl_ack_ring.delay = 1000;
1701 
1702 	/*
1703 	 * A payload size of MAX_EVENT_PAYLOAD_SIZE is enough here since large
1704 	 * ACL packets will be transmitted inside buffers mapped via
1705 	 * acl_d2h_ring anyway.
1706 	 */
1707 	bcm4377->hci_acl_event_ring.ring_id = BCM4377_EVENT_RING_HCI_ACL;
1708 	bcm4377->hci_acl_event_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE;
1709 	bcm4377->hci_acl_event_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES;
1710 	bcm4377->hci_acl_event_ring.transfer_rings =
1711 		BIT(BCM4377_XFER_RING_HCI_D2H) | BIT(BCM4377_XFER_RING_ACL_D2H);
1712 	bcm4377->hci_acl_event_ring.delay = 1000;
1713 
1714 	bcm4377->sco_ack_ring.ring_id = BCM4377_ACK_RING_SCO;
1715 	bcm4377->sco_ack_ring.n_entries = BCM4377_RING_N_ENTRIES;
1716 	bcm4377->sco_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_H2D);
1717 
1718 	bcm4377->sco_event_ring.ring_id = BCM4377_EVENT_RING_SCO;
1719 	bcm4377->sco_event_ring.payload_size = MAX_SCO_PAYLOAD_SIZE;
1720 	bcm4377->sco_event_ring.n_entries = BCM4377_RING_N_ENTRIES;
1721 	bcm4377->sco_event_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_D2H);
1722 
1723 	bcm4377->control_h2d_ring.ring_id = BCM4377_XFER_RING_CONTROL;
1724 	bcm4377->control_h2d_ring.doorbell = BCM4377_DOORBELL_CONTROL;
1725 	bcm4377->control_h2d_ring.payload_size = BCM4377_CONTROL_MSG_SIZE;
1726 	bcm4377->control_h2d_ring.completion_ring = BCM4377_ACK_RING_CONTROL;
1727 	bcm4377->control_h2d_ring.allow_wait = true;
1728 	bcm4377->control_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1729 
1730 	bcm4377->hci_h2d_ring.ring_id = BCM4377_XFER_RING_HCI_H2D;
1731 	bcm4377->hci_h2d_ring.doorbell = BCM4377_DOORBELL_HCI_H2D;
1732 	bcm4377->hci_h2d_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE;
1733 	bcm4377->hci_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL;
1734 	bcm4377->hci_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1735 
1736 	bcm4377->hci_d2h_ring.ring_id = BCM4377_XFER_RING_HCI_D2H;
1737 	bcm4377->hci_d2h_ring.doorbell = BCM4377_DOORBELL_HCI_D2H;
1738 	bcm4377->hci_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL;
1739 	bcm4377->hci_d2h_ring.virtual = true;
1740 	bcm4377->hci_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
1741 
1742 	bcm4377->sco_h2d_ring.ring_id = BCM4377_XFER_RING_SCO_H2D;
1743 	bcm4377->sco_h2d_ring.doorbell = BCM4377_DOORBELL_SCO;
1744 	bcm4377->sco_h2d_ring.payload_size = MAX_SCO_PAYLOAD_SIZE;
1745 	bcm4377->sco_h2d_ring.completion_ring = BCM4377_ACK_RING_SCO;
1746 	bcm4377->sco_h2d_ring.sync = true;
1747 	bcm4377->sco_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1748 
1749 	bcm4377->sco_d2h_ring.ring_id = BCM4377_XFER_RING_SCO_D2H;
1750 	bcm4377->sco_d2h_ring.doorbell = BCM4377_DOORBELL_SCO;
1751 	bcm4377->sco_d2h_ring.completion_ring = BCM4377_EVENT_RING_SCO;
1752 	bcm4377->sco_d2h_ring.virtual = true;
1753 	bcm4377->sco_d2h_ring.sync = true;
1754 	bcm4377->sco_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
1755 
1756 	/*
1757 	 * This ring has to use mapped_payload_size because the largest ACL
1758 	 * packet doesn't fit inside the largest possible footer
1759 	 */
1760 	bcm4377->acl_h2d_ring.ring_id = BCM4377_XFER_RING_ACL_H2D;
1761 	bcm4377->acl_h2d_ring.doorbell = BCM4377_DOORBELL_ACL_H2D;
1762 	bcm4377->acl_h2d_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE;
1763 	bcm4377->acl_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL;
1764 	bcm4377->acl_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1765 
1766 	/*
1767 	 * This ring only contains empty buffers to be used by incoming
1768 	 * ACL packets that do not fit inside the footer of hci_acl_event_ring
1769 	 */
1770 	bcm4377->acl_d2h_ring.ring_id = BCM4377_XFER_RING_ACL_D2H;
1771 	bcm4377->acl_d2h_ring.doorbell = BCM4377_DOORBELL_ACL_D2H;
1772 	bcm4377->acl_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL;
1773 	bcm4377->acl_d2h_ring.d2h_buffers_only = true;
1774 	bcm4377->acl_d2h_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE;
1775 	bcm4377->acl_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
1776 
1777 	/*
1778 	 * no need for any cleanup since this is only called from _probe
1779 	 * and only devres-managed allocations are used
1780 	 */
1781 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->control_h2d_ring);
1782 	if (ret)
1783 		return ret;
1784 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1785 	if (ret)
1786 		return ret;
1787 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1788 	if (ret)
1789 		return ret;
1790 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1791 	if (ret)
1792 		return ret;
1793 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1794 	if (ret)
1795 		return ret;
1796 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1797 	if (ret)
1798 		return ret;
1799 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
1800 	if (ret)
1801 		return ret;
1802 
1803 	ret = bcm4377_alloc_completion_ring(bcm4377,
1804 					    &bcm4377->control_ack_ring);
1805 	if (ret)
1806 		return ret;
1807 	ret = bcm4377_alloc_completion_ring(bcm4377,
1808 					    &bcm4377->hci_acl_ack_ring);
1809 	if (ret)
1810 		return ret;
1811 	ret = bcm4377_alloc_completion_ring(bcm4377,
1812 					    &bcm4377->hci_acl_event_ring);
1813 	if (ret)
1814 		return ret;
1815 	ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1816 	if (ret)
1817 		return ret;
1818 	ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1819 	if (ret)
1820 		return ret;
1821 
1822 	dev_dbg(&bcm4377->pdev->dev, "all rings allocated and prepared\n");
1823 
1824 	return 0;
1825 }
1826 
bcm4377_boot(struct bcm4377_data * bcm4377)1827 static int bcm4377_boot(struct bcm4377_data *bcm4377)
1828 {
1829 	const struct firmware *fw;
1830 	void *bfr;
1831 	dma_addr_t fw_dma;
1832 	int ret = 0;
1833 	u32 bootstage, rti_status;
1834 
1835 	bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
1836 	rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
1837 
1838 	if (bootstage != 0) {
1839 		dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n",
1840 			bootstage);
1841 		return -EINVAL;
1842 	}
1843 
1844 	if (rti_status != 0) {
1845 		dev_err(&bcm4377->pdev->dev, "RTI status is %d and not 0\n",
1846 			rti_status);
1847 		return -EINVAL;
1848 	}
1849 
1850 	fw = bcm4377_request_blob(bcm4377, "bin");
1851 	if (!fw) {
1852 		dev_err(&bcm4377->pdev->dev, "Failed to load firmware\n");
1853 		return -ENOENT;
1854 	}
1855 
1856 	bfr = dma_alloc_coherent(&bcm4377->pdev->dev, fw->size, &fw_dma,
1857 				 GFP_KERNEL);
1858 	if (!bfr) {
1859 		ret = -ENOMEM;
1860 		goto out_release_fw;
1861 	}
1862 
1863 	memcpy(bfr, fw->data, fw->size);
1864 
1865 	iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_LO);
1866 	iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_HI);
1867 	iowrite32(BCM4377_DMA_MASK,
1868 		  bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE);
1869 
1870 	iowrite32(lower_32_bits(fw_dma),
1871 		  bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_LO);
1872 	iowrite32(upper_32_bits(fw_dma),
1873 		  bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_HI);
1874 	iowrite32(fw->size,
1875 		  bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_SIZE);
1876 	iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL);
1877 
1878 	dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n");
1879 
1880 	ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
1881 							BCM4377_BOOT_TIMEOUT);
1882 	if (ret == 0) {
1883 		ret = -ETIMEDOUT;
1884 		goto out_dma_free;
1885 	} else if (ret < 0) {
1886 		goto out_dma_free;
1887 	}
1888 
1889 	if (bcm4377->bootstage != 2) {
1890 		dev_err(&bcm4377->pdev->dev, "boostage %d != 2\n",
1891 			bcm4377->bootstage);
1892 		ret = -ENXIO;
1893 		goto out_dma_free;
1894 	}
1895 
1896 	dev_dbg(&bcm4377->pdev->dev, "firmware has booted (stage = %x)\n",
1897 		bcm4377->bootstage);
1898 	ret = 0;
1899 
1900 out_dma_free:
1901 	dma_free_coherent(&bcm4377->pdev->dev, fw->size, bfr, fw_dma);
1902 out_release_fw:
1903 	release_firmware(fw);
1904 	return ret;
1905 }
1906 
bcm4377_setup_rti(struct bcm4377_data * bcm4377)1907 static int bcm4377_setup_rti(struct bcm4377_data *bcm4377)
1908 {
1909 	int ret;
1910 
1911 	dev_dbg(&bcm4377->pdev->dev, "starting RTI\n");
1912 	iowrite32(1, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
1913 
1914 	ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
1915 							BCM4377_TIMEOUT);
1916 	if (ret == 0) {
1917 		dev_err(&bcm4377->pdev->dev,
1918 			"timed out while waiting for RTI to transition to state 1");
1919 		return -ETIMEDOUT;
1920 	} else if (ret < 0) {
1921 		return ret;
1922 	}
1923 
1924 	if (bcm4377->rti_status != 1) {
1925 		dev_err(&bcm4377->pdev->dev, "RTI did not ack state 1 (%d)\n",
1926 			bcm4377->rti_status);
1927 		return -ENODEV;
1928 	}
1929 	dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n");
1930 
1931 	/* allow access to the entire IOVA space again */
1932 	iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_LO);
1933 	iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_HI);
1934 	iowrite32(BCM4377_DMA_MASK,
1935 		  bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_SIZE);
1936 
1937 	/* setup "Converged IPC" context */
1938 	iowrite32(lower_32_bits(bcm4377->ctx_dma),
1939 		  bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_LO);
1940 	iowrite32(upper_32_bits(bcm4377->ctx_dma),
1941 		  bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_HI);
1942 	iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
1943 
1944 	ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
1945 							BCM4377_TIMEOUT);
1946 	if (ret == 0) {
1947 		dev_err(&bcm4377->pdev->dev,
1948 			"timed out while waiting for RTI to transition to state 2");
1949 		return -ETIMEDOUT;
1950 	} else if (ret < 0) {
1951 		return ret;
1952 	}
1953 
1954 	if (bcm4377->rti_status != 2) {
1955 		dev_err(&bcm4377->pdev->dev, "RTI did not ack state 2 (%d)\n",
1956 			bcm4377->rti_status);
1957 		return -ENODEV;
1958 	}
1959 
1960 	dev_dbg(&bcm4377->pdev->dev,
1961 		"RTI is in state 2; control ring is ready\n");
1962 	bcm4377->control_ack_ring.enabled = true;
1963 
1964 	return 0;
1965 }
1966 
bcm4377_parse_otp_board_params(struct bcm4377_data * bcm4377,char tag,const char * val,size_t len)1967 static int bcm4377_parse_otp_board_params(struct bcm4377_data *bcm4377,
1968 					  char tag, const char *val, size_t len)
1969 {
1970 	if (tag != 'V')
1971 		return 0;
1972 	if (len >= sizeof(bcm4377->vendor))
1973 		return -EINVAL;
1974 
1975 	strscpy(bcm4377->vendor, val, len + 1);
1976 	return 0;
1977 }
1978 
bcm4377_parse_otp_chip_params(struct bcm4377_data * bcm4377,char tag,const char * val,size_t len)1979 static int bcm4377_parse_otp_chip_params(struct bcm4377_data *bcm4377, char tag,
1980 					 const char *val, size_t len)
1981 {
1982 	size_t idx = 0;
1983 
1984 	if (tag != 's')
1985 		return 0;
1986 	if (len >= sizeof(bcm4377->stepping))
1987 		return -EINVAL;
1988 
1989 	while (len != 0) {
1990 		bcm4377->stepping[idx] = tolower(val[idx]);
1991 		if (val[idx] == '\0')
1992 			return 0;
1993 
1994 		idx++;
1995 		len--;
1996 	}
1997 
1998 	bcm4377->stepping[idx] = '\0';
1999 	return 0;
2000 }
2001 
bcm4377_parse_otp_str(struct bcm4377_data * bcm4377,const u8 * str,enum bcm4377_otp_params_type type)2002 static int bcm4377_parse_otp_str(struct bcm4377_data *bcm4377, const u8 *str,
2003 				 enum bcm4377_otp_params_type type)
2004 {
2005 	const char *p;
2006 	int ret;
2007 
2008 	p = skip_spaces(str);
2009 	while (*p) {
2010 		char tag = *p++;
2011 		const char *end;
2012 		size_t len;
2013 
2014 		if (*p++ != '=') /* implicit NUL check */
2015 			return -EINVAL;
2016 
2017 		/* *p might be NUL here, if so end == p and len == 0 */
2018 		end = strchrnul(p, ' ');
2019 		len = end - p;
2020 
2021 		/* leave 1 byte for NUL in destination string */
2022 		if (len > (BCM4377_OTP_MAX_PARAM_LEN - 1))
2023 			return -EINVAL;
2024 
2025 		switch (type) {
2026 		case BCM4377_OTP_BOARD_PARAMS:
2027 			ret = bcm4377_parse_otp_board_params(bcm4377, tag, p,
2028 							     len);
2029 			break;
2030 		case BCM4377_OTP_CHIP_PARAMS:
2031 			ret = bcm4377_parse_otp_chip_params(bcm4377, tag, p,
2032 							    len);
2033 			break;
2034 		default:
2035 			ret = -EINVAL;
2036 			break;
2037 		}
2038 
2039 		if (ret)
2040 			return ret;
2041 
2042 		/* Skip to next arg, if any */
2043 		p = skip_spaces(end);
2044 	}
2045 
2046 	return 0;
2047 }
2048 
bcm4377_parse_otp_sys_vendor(struct bcm4377_data * bcm4377,u8 * otp,size_t size)2049 static int bcm4377_parse_otp_sys_vendor(struct bcm4377_data *bcm4377, u8 *otp,
2050 					size_t size)
2051 {
2052 	int idx = 4;
2053 	const char *chip_params;
2054 	const char *board_params;
2055 	int ret;
2056 
2057 	/* 4-byte header and two empty strings */
2058 	if (size < 6)
2059 		return -EINVAL;
2060 
2061 	if (get_unaligned_le32(otp) != BCM4377_OTP_VENDOR_HDR)
2062 		return -EINVAL;
2063 
2064 	chip_params = &otp[idx];
2065 
2066 	/* Skip first string, including terminator */
2067 	idx += strnlen(chip_params, size - idx) + 1;
2068 	if (idx >= size)
2069 		return -EINVAL;
2070 
2071 	board_params = &otp[idx];
2072 
2073 	/* Skip to terminator of second string */
2074 	idx += strnlen(board_params, size - idx);
2075 	if (idx >= size)
2076 		return -EINVAL;
2077 
2078 	/* At this point both strings are guaranteed NUL-terminated */
2079 	dev_dbg(&bcm4377->pdev->dev,
2080 		"OTP: chip_params='%s' board_params='%s'\n", chip_params,
2081 		board_params);
2082 
2083 	ret = bcm4377_parse_otp_str(bcm4377, chip_params,
2084 				    BCM4377_OTP_CHIP_PARAMS);
2085 	if (ret)
2086 		return ret;
2087 
2088 	ret = bcm4377_parse_otp_str(bcm4377, board_params,
2089 				    BCM4377_OTP_BOARD_PARAMS);
2090 	if (ret)
2091 		return ret;
2092 
2093 	if (!bcm4377->stepping[0] || !bcm4377->vendor[0])
2094 		return -EINVAL;
2095 
2096 	dev_dbg(&bcm4377->pdev->dev, "OTP: stepping=%s, vendor=%s\n",
2097 		bcm4377->stepping, bcm4377->vendor);
2098 	return 0;
2099 }
2100 
bcm4377_parse_otp(struct bcm4377_data * bcm4377)2101 static int bcm4377_parse_otp(struct bcm4377_data *bcm4377)
2102 {
2103 	u8 *otp;
2104 	int i;
2105 	int ret = -ENOENT;
2106 
2107 	otp = kzalloc(BCM4377_OTP_SIZE, GFP_KERNEL);
2108 	if (!otp)
2109 		return -ENOMEM;
2110 
2111 	for (i = 0; i < BCM4377_OTP_SIZE; ++i)
2112 		otp[i] = ioread8(bcm4377->bar0 + bcm4377->hw->otp_offset + i);
2113 
2114 	i = 0;
2115 	while (i < (BCM4377_OTP_SIZE - 1)) {
2116 		u8 type = otp[i];
2117 		u8 length = otp[i + 1];
2118 
2119 		if (type == 0)
2120 			break;
2121 
2122 		if ((i + 2 + length) > BCM4377_OTP_SIZE)
2123 			break;
2124 
2125 		switch (type) {
2126 		case BCM4377_OTP_SYS_VENDOR:
2127 			dev_dbg(&bcm4377->pdev->dev,
2128 				"OTP @ 0x%x (%d): SYS_VENDOR", i, length);
2129 			ret = bcm4377_parse_otp_sys_vendor(bcm4377, &otp[i + 2],
2130 							   length);
2131 			break;
2132 		case BCM4377_OTP_CIS:
2133 			dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): CIS", i,
2134 				length);
2135 			break;
2136 		default:
2137 			dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): unknown",
2138 				i, length);
2139 			break;
2140 		}
2141 
2142 		i += 2 + length;
2143 	}
2144 
2145 	kfree(otp);
2146 	return ret;
2147 }
2148 
bcm4377_init_cfg(struct bcm4377_data * bcm4377)2149 static int bcm4377_init_cfg(struct bcm4377_data *bcm4377)
2150 {
2151 	int ret;
2152 	u32 ctrl;
2153 
2154 	ret = pci_write_config_dword(bcm4377->pdev,
2155 				     BCM4377_PCIECFG_BAR0_WINDOW1,
2156 				     bcm4377->hw->bar0_window1);
2157 	if (ret)
2158 		return ret;
2159 
2160 	ret = pci_write_config_dword(bcm4377->pdev,
2161 				     BCM4377_PCIECFG_BAR0_WINDOW2,
2162 				     bcm4377->hw->bar0_window2);
2163 	if (ret)
2164 		return ret;
2165 
2166 	ret = pci_write_config_dword(
2167 		bcm4377->pdev, BCM4377_PCIECFG_BAR0_CORE2_WINDOW1,
2168 		BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT);
2169 	if (ret)
2170 		return ret;
2171 
2172 	if (bcm4377->hw->has_bar0_core2_window2) {
2173 		ret = pci_write_config_dword(bcm4377->pdev,
2174 					     BCM4377_PCIECFG_BAR0_CORE2_WINDOW2,
2175 					     bcm4377->hw->bar0_core2_window2);
2176 		if (ret)
2177 			return ret;
2178 	}
2179 
2180 	ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR2_WINDOW,
2181 				     BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT);
2182 	if (ret)
2183 		return ret;
2184 
2185 	ret = pci_read_config_dword(bcm4377->pdev,
2186 				    BCM4377_PCIECFG_SUBSYSTEM_CTRL, &ctrl);
2187 	if (ret)
2188 		return ret;
2189 
2190 	if (bcm4377->hw->clear_pciecfg_subsystem_ctrl_bit19)
2191 		ctrl &= ~BIT(19);
2192 	ctrl |= BIT(16);
2193 
2194 	return pci_write_config_dword(bcm4377->pdev,
2195 				      BCM4377_PCIECFG_SUBSYSTEM_CTRL, ctrl);
2196 }
2197 
bcm4377_probe_dmi(struct bcm4377_data * bcm4377)2198 static int bcm4377_probe_dmi(struct bcm4377_data *bcm4377)
2199 {
2200 	const struct dmi_system_id *board_type_dmi_id;
2201 
2202 	board_type_dmi_id = dmi_first_match(bcm4377_dmi_board_table);
2203 	if (board_type_dmi_id && board_type_dmi_id->driver_data) {
2204 		bcm4377->board_type = board_type_dmi_id->driver_data;
2205 		dev_dbg(&bcm4377->pdev->dev,
2206 			"found board type via DMI match: %s\n",
2207 			bcm4377->board_type);
2208 	}
2209 
2210 	return 0;
2211 }
2212 
bcm4377_probe_of(struct bcm4377_data * bcm4377)2213 static int bcm4377_probe_of(struct bcm4377_data *bcm4377)
2214 {
2215 	struct device_node *np = bcm4377->pdev->dev.of_node;
2216 	int ret;
2217 
2218 	if (!np)
2219 		return 0;
2220 
2221 	ret = of_property_read_string(np, "brcm,board-type",
2222 				      &bcm4377->board_type);
2223 	if (ret) {
2224 		dev_err(&bcm4377->pdev->dev, "no brcm,board-type property\n");
2225 		return ret;
2226 	}
2227 
2228 	bcm4377->taurus_beamforming_cal_blob =
2229 		of_get_property(np, "brcm,taurus-bf-cal-blob",
2230 				&bcm4377->taurus_beamforming_cal_size);
2231 	if (!bcm4377->taurus_beamforming_cal_blob) {
2232 		dev_err(&bcm4377->pdev->dev,
2233 			"no brcm,taurus-bf-cal-blob property\n");
2234 		return -ENOENT;
2235 	}
2236 	bcm4377->taurus_cal_blob = of_get_property(np, "brcm,taurus-cal-blob",
2237 						   &bcm4377->taurus_cal_size);
2238 	if (!bcm4377->taurus_cal_blob) {
2239 		dev_err(&bcm4377->pdev->dev,
2240 			"no brcm,taurus-cal-blob property\n");
2241 		return -ENOENT;
2242 	}
2243 
2244 	return 0;
2245 }
2246 
bcm4377_disable_aspm(struct bcm4377_data * bcm4377)2247 static void bcm4377_disable_aspm(struct bcm4377_data *bcm4377)
2248 {
2249 	pci_disable_link_state(bcm4377->pdev,
2250 			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2251 
2252 	/*
2253 	 * pci_disable_link_state can fail if either CONFIG_PCIEASPM is disabled
2254 	 * or if the BIOS hasn't handed over control to us. We must *always*
2255 	 * disable ASPM for this device due to hardware errata though.
2256 	 */
2257 	pcie_capability_clear_word(bcm4377->pdev, PCI_EXP_LNKCTL,
2258 				   PCI_EXP_LNKCTL_ASPMC);
2259 }
2260 
bcm4377_pci_free_irq_vectors(void * data)2261 static void bcm4377_pci_free_irq_vectors(void *data)
2262 {
2263 	pci_free_irq_vectors(data);
2264 }
2265 
bcm4377_hci_free_dev(void * data)2266 static void bcm4377_hci_free_dev(void *data)
2267 {
2268 	hci_free_dev(data);
2269 }
2270 
bcm4377_hci_unregister_dev(void * data)2271 static void bcm4377_hci_unregister_dev(void *data)
2272 {
2273 	hci_unregister_dev(data);
2274 }
2275 
bcm4377_probe(struct pci_dev * pdev,const struct pci_device_id * id)2276 static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2277 {
2278 	struct bcm4377_data *bcm4377;
2279 	struct hci_dev *hdev;
2280 	int ret, irq;
2281 
2282 	ret = dma_set_mask_and_coherent(&pdev->dev, BCM4377_DMA_MASK);
2283 	if (ret)
2284 		return ret;
2285 
2286 	bcm4377 = devm_kzalloc(&pdev->dev, sizeof(*bcm4377), GFP_KERNEL);
2287 	if (!bcm4377)
2288 		return -ENOMEM;
2289 
2290 	bcm4377->pdev = pdev;
2291 	bcm4377->hw = &bcm4377_hw_variants[id->driver_data];
2292 	init_completion(&bcm4377->event);
2293 
2294 	ret = bcm4377_prepare_rings(bcm4377);
2295 	if (ret)
2296 		return ret;
2297 
2298 	ret = bcm4377_init_context(bcm4377);
2299 	if (ret)
2300 		return ret;
2301 
2302 	ret = bcm4377_probe_dmi(bcm4377);
2303 	if (ret)
2304 		return ret;
2305 	ret = bcm4377_probe_of(bcm4377);
2306 	if (ret)
2307 		return ret;
2308 	if (!bcm4377->board_type) {
2309 		dev_err(&pdev->dev, "unable to determine board type\n");
2310 		return -ENODEV;
2311 	}
2312 
2313 	if (bcm4377->hw->disable_aspm)
2314 		bcm4377_disable_aspm(bcm4377);
2315 
2316 	ret = pci_reset_function_locked(pdev);
2317 	if (ret)
2318 		dev_warn(
2319 			&pdev->dev,
2320 			"function level reset failed with %d; trying to continue anyway\n",
2321 			ret);
2322 
2323 	/*
2324 	 * If this number is too low and we try to access any BAR too
2325 	 * early the device will crash. Experiments have shown that
2326 	 * approximately 50 msec is the minimum amount we have to wait.
2327 	 * Let's double that to be safe.
2328 	 */
2329 	msleep(100);
2330 
2331 	ret = pcim_enable_device(pdev);
2332 	if (ret)
2333 		return ret;
2334 	pci_set_master(pdev);
2335 
2336 	ret = bcm4377_init_cfg(bcm4377);
2337 	if (ret)
2338 		return ret;
2339 
2340 	bcm4377->bar0 = pcim_iomap(pdev, 0, 0);
2341 	if (!bcm4377->bar0)
2342 		return -EBUSY;
2343 	bcm4377->bar2 = pcim_iomap(pdev, 2, 0);
2344 	if (!bcm4377->bar2)
2345 		return -EBUSY;
2346 
2347 	ret = bcm4377_parse_otp(bcm4377);
2348 	if (ret) {
2349 		dev_err(&pdev->dev, "Reading OTP failed with %d\n", ret);
2350 		return ret;
2351 	}
2352 
2353 	/*
2354 	 * Legacy interrupts result in an IRQ storm because we don't know where
2355 	 * the interrupt mask and status registers for these chips are.
2356 	 * MSIs are acked automatically instead.
2357 	 */
2358 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
2359 	if (ret < 0)
2360 		return -ENODEV;
2361 	ret = devm_add_action_or_reset(&pdev->dev, bcm4377_pci_free_irq_vectors,
2362 				       pdev);
2363 	if (ret)
2364 		return ret;
2365 
2366 	irq = pci_irq_vector(pdev, 0);
2367 	if (irq <= 0)
2368 		return -ENODEV;
2369 
2370 	ret = devm_request_irq(&pdev->dev, irq, bcm4377_irq, 0, "bcm4377",
2371 			       bcm4377);
2372 	if (ret)
2373 		return ret;
2374 
2375 	hdev = hci_alloc_dev();
2376 	if (!hdev)
2377 		return -ENOMEM;
2378 	ret = devm_add_action_or_reset(&pdev->dev, bcm4377_hci_free_dev, hdev);
2379 	if (ret)
2380 		return ret;
2381 
2382 	bcm4377->hdev = hdev;
2383 
2384 	hdev->bus = HCI_PCI;
2385 	hdev->open = bcm4377_hci_open;
2386 	hdev->close = bcm4377_hci_close;
2387 	hdev->send = bcm4377_hci_send_frame;
2388 	hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
2389 	hdev->setup = bcm4377_hci_setup;
2390 
2391 	if (bcm4377->hw->broken_mws_transport_config)
2392 		set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
2393 	if (bcm4377->hw->broken_ext_scan)
2394 		set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
2395 	if (bcm4377->hw->broken_le_coded)
2396 		set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
2397 	if (bcm4377->hw->broken_le_ext_adv_report_phy)
2398 		set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
2399 
2400 	pci_set_drvdata(pdev, bcm4377);
2401 	hci_set_drvdata(hdev, bcm4377);
2402 	SET_HCIDEV_DEV(hdev, &pdev->dev);
2403 
2404 	ret = bcm4377_boot(bcm4377);
2405 	if (ret)
2406 		return ret;
2407 
2408 	ret = bcm4377_setup_rti(bcm4377);
2409 	if (ret)
2410 		return ret;
2411 
2412 	ret = hci_register_dev(hdev);
2413 	if (ret)
2414 		return ret;
2415 	return devm_add_action_or_reset(&pdev->dev, bcm4377_hci_unregister_dev,
2416 					hdev);
2417 }
2418 
bcm4377_suspend(struct pci_dev * pdev,pm_message_t state)2419 static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state)
2420 {
2421 	struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
2422 	int ret;
2423 
2424 	ret = hci_suspend_dev(bcm4377->hdev);
2425 	if (ret)
2426 		return ret;
2427 
2428 	iowrite32(BCM4377_BAR0_SLEEP_CONTROL_QUIESCE,
2429 		  bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
2430 
2431 	return 0;
2432 }
2433 
bcm4377_resume(struct pci_dev * pdev)2434 static int bcm4377_resume(struct pci_dev *pdev)
2435 {
2436 	struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
2437 
2438 	iowrite32(BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE,
2439 		  bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
2440 
2441 	return hci_resume_dev(bcm4377->hdev);
2442 }
2443 
2444 static const struct dmi_system_id bcm4377_dmi_board_table[] = {
2445 	{
2446 		.matches = {
2447 			DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
2448 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"),
2449 		},
2450 		.driver_data = "apple,formosa",
2451 	},
2452 	{
2453 		.matches = {
2454 			DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
2455 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro15,4"),
2456 		},
2457 		.driver_data = "apple,formosa",
2458 	},
2459 	{
2460 		.matches = {
2461 			DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
2462 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,3"),
2463 		},
2464 		.driver_data = "apple,formosa",
2465 	},
2466 	{}
2467 };
2468 
2469 static const struct bcm4377_hw bcm4377_hw_variants[] = {
2470 	[BCM4377] = {
2471 		.id = 0x4377,
2472 		.otp_offset = 0x4120,
2473 		.bar0_window1 = 0x1800b000,
2474 		.bar0_window2 = 0x1810c000,
2475 		.disable_aspm = true,
2476 		.broken_ext_scan = true,
2477 		.send_ptb = bcm4377_send_ptb,
2478 	},
2479 
2480 	[BCM4378] = {
2481 		.id = 0x4378,
2482 		.otp_offset = 0x4120,
2483 		.bar0_window1 = 0x18002000,
2484 		.bar0_window2 = 0x1810a000,
2485 		.bar0_core2_window2 = 0x18107000,
2486 		.has_bar0_core2_window2 = true,
2487 		.broken_mws_transport_config = true,
2488 		.broken_le_coded = true,
2489 		.send_calibration = bcm4378_send_calibration,
2490 		.send_ptb = bcm4378_send_ptb,
2491 	},
2492 
2493 	[BCM4387] = {
2494 		.id = 0x4387,
2495 		.otp_offset = 0x413c,
2496 		.bar0_window1 = 0x18002000,
2497 		.bar0_window2 = 0x18109000,
2498 		.bar0_core2_window2 = 0x18106000,
2499 		.has_bar0_core2_window2 = true,
2500 		.clear_pciecfg_subsystem_ctrl_bit19 = true,
2501 		.broken_mws_transport_config = true,
2502 		.broken_le_coded = true,
2503 		.broken_le_ext_adv_report_phy = true,
2504 		.send_calibration = bcm4387_send_calibration,
2505 		.send_ptb = bcm4378_send_ptb,
2506 	},
2507 
2508 	[BCM4388] = {
2509 		.id = 0x4388,
2510 		.otp_offset = 0x415c,
2511 		.bar2_offset = 0x200000,
2512 		.bar0_window1 = 0x18002000,
2513 		.bar0_window2 = 0x18109000,
2514 		.bar0_core2_window2 = 0x18106000,
2515 		.has_bar0_core2_window2 = true,
2516 		.broken_mws_transport_config = true,
2517 		.broken_le_coded = true,
2518 		.broken_le_ext_adv_report_phy = true,
2519 		.send_calibration = bcm4388_send_calibration,
2520 		.send_ptb = bcm4378_send_ptb,
2521 	},
2522 };
2523 
2524 #define BCM4377_DEVID_ENTRY(id)                                             \
2525 	{                                                                   \
2526 		PCI_VENDOR_ID_BROADCOM, BCM##id##_DEVICE_ID, PCI_ANY_ID,    \
2527 			PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \
2528 			BCM##id                                             \
2529 	}
2530 
2531 static const struct pci_device_id bcm4377_devid_table[] = {
2532 	BCM4377_DEVID_ENTRY(4377),
2533 	BCM4377_DEVID_ENTRY(4378),
2534 	BCM4377_DEVID_ENTRY(4387),
2535 	BCM4377_DEVID_ENTRY(4388),
2536 	{},
2537 };
2538 MODULE_DEVICE_TABLE(pci, bcm4377_devid_table);
2539 
2540 static struct pci_driver bcm4377_pci_driver = {
2541 	.name = "hci_bcm4377",
2542 	.id_table = bcm4377_devid_table,
2543 	.probe = bcm4377_probe,
2544 	.suspend = bcm4377_suspend,
2545 	.resume = bcm4377_resume,
2546 };
2547 module_pci_driver(bcm4377_pci_driver);
2548 
2549 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
2550 MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387/4388 devices");
2551 MODULE_LICENSE("Dual MIT/GPL");
2552 MODULE_FIRMWARE("brcm/brcmbt4377*.bin");
2553 MODULE_FIRMWARE("brcm/brcmbt4377*.ptb");
2554 MODULE_FIRMWARE("brcm/brcmbt4378*.bin");
2555 MODULE_FIRMWARE("brcm/brcmbt4378*.ptb");
2556 MODULE_FIRMWARE("brcm/brcmbt4387*.bin");
2557 MODULE_FIRMWARE("brcm/brcmbt4387*.ptb");
2558 MODULE_FIRMWARE("brcm/brcmbt4388*.bin");
2559 MODULE_FIRMWARE("brcm/brcmbt4388*.ptb");
2560