1  // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2  /* QLogic qed NIC Driver
3   * Copyright (c) 2015-2017  QLogic Corporation
4   * Copyright (c) 2019-2020 Marvell International Ltd.
5   */
6  
7  #include <linux/types.h>
8  #include <asm/byteorder.h>
9  #include <linux/delay.h>
10  #include <linux/errno.h>
11  #include <linux/kernel.h>
12  #include <linux/slab.h>
13  #include <linux/spinlock.h>
14  #include <linux/string.h>
15  #include <linux/etherdevice.h>
16  #include "qed.h"
17  #include "qed_cxt.h"
18  #include "qed_dcbx.h"
19  #include "qed_hsi.h"
20  #include "qed_mfw_hsi.h"
21  #include "qed_hw.h"
22  #include "qed_mcp.h"
23  #include "qed_reg_addr.h"
24  #include "qed_sriov.h"
25  
26  #define GRCBASE_MCP     0xe00000
27  
28  #define QED_MCP_RESP_ITER_US	10
29  
30  #define QED_DRV_MB_MAX_RETRIES	(500 * 1000)	/* Account for 5 sec */
31  #define QED_MCP_RESET_RETRIES	(50 * 1000)	/* Account for 500 msec */
32  
33  #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)	     \
34  	qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \
35  	       _val)
36  
37  #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
38  	qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)))
39  
40  #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
41  	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
42  		     offsetof(struct public_drv_mb, _field), _val)
43  
44  #define DRV_MB_RD(_p_hwfn, _p_ptt, _field)	   \
45  	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
46  		     offsetof(struct public_drv_mb, _field))
47  
48  #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
49  		  DRV_ID_PDA_COMP_VER_SHIFT)
50  
51  #define MCP_BYTES_PER_MBIT_SHIFT 17
52  
qed_mcp_is_init(struct qed_hwfn * p_hwfn)53  bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
54  {
55  	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
56  		return false;
57  	return true;
58  }
59  
qed_mcp_cmd_port_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)60  void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
61  {
62  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
63  					PUBLIC_PORT);
64  	u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
65  
66  	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
67  						   MFW_PORT(p_hwfn));
68  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
69  		   "port_addr = 0x%x, port_id 0x%02x\n",
70  		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
71  }
72  
qed_mcp_read_mb(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)73  void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
74  {
75  	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
76  	u32 tmp, i;
77  
78  	if (!p_hwfn->mcp_info->public_base)
79  		return;
80  
81  	for (i = 0; i < length; i++) {
82  		tmp = qed_rd(p_hwfn, p_ptt,
83  			     p_hwfn->mcp_info->mfw_mb_addr +
84  			     (i << 2) + sizeof(u32));
85  
86  		/* The MB data is actually BE; Need to force it to cpu */
87  		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
88  			be32_to_cpu((__force __be32)tmp);
89  	}
90  }
91  
92  struct qed_mcp_cmd_elem {
93  	struct list_head list;
94  	struct qed_mcp_mb_params *p_mb_params;
95  	u16 expected_seq_num;
96  	bool b_is_completed;
97  };
98  
99  /* Must be called while cmd_lock is acquired */
100  static struct qed_mcp_cmd_elem *
qed_mcp_cmd_add_elem(struct qed_hwfn * p_hwfn,struct qed_mcp_mb_params * p_mb_params,u16 expected_seq_num)101  qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
102  		     struct qed_mcp_mb_params *p_mb_params,
103  		     u16 expected_seq_num)
104  {
105  	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
106  
107  	p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
108  	if (!p_cmd_elem)
109  		goto out;
110  
111  	p_cmd_elem->p_mb_params = p_mb_params;
112  	p_cmd_elem->expected_seq_num = expected_seq_num;
113  	list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
114  out:
115  	return p_cmd_elem;
116  }
117  
118  /* Must be called while cmd_lock is acquired */
qed_mcp_cmd_del_elem(struct qed_hwfn * p_hwfn,struct qed_mcp_cmd_elem * p_cmd_elem)119  static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
120  				 struct qed_mcp_cmd_elem *p_cmd_elem)
121  {
122  	list_del(&p_cmd_elem->list);
123  	kfree(p_cmd_elem);
124  }
125  
126  /* Must be called while cmd_lock is acquired */
qed_mcp_cmd_get_elem(struct qed_hwfn * p_hwfn,u16 seq_num)127  static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
128  						     u16 seq_num)
129  {
130  	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
131  
132  	list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
133  		if (p_cmd_elem->expected_seq_num == seq_num)
134  			return p_cmd_elem;
135  	}
136  
137  	return NULL;
138  }
139  
qed_mcp_free(struct qed_hwfn * p_hwfn)140  int qed_mcp_free(struct qed_hwfn *p_hwfn)
141  {
142  	if (p_hwfn->mcp_info) {
143  		struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp;
144  
145  		kfree(p_hwfn->mcp_info->mfw_mb_cur);
146  		kfree(p_hwfn->mcp_info->mfw_mb_shadow);
147  
148  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
149  		list_for_each_entry_safe(p_cmd_elem,
150  					 p_tmp,
151  					 &p_hwfn->mcp_info->cmd_list, list) {
152  			qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
153  		}
154  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
155  	}
156  
157  	kfree(p_hwfn->mcp_info);
158  	p_hwfn->mcp_info = NULL;
159  
160  	return 0;
161  }
162  
163  /* Maximum of 1 sec to wait for the SHMEM ready indication */
164  #define QED_MCP_SHMEM_RDY_MAX_RETRIES	20
165  #define QED_MCP_SHMEM_RDY_ITER_MS	50
166  
qed_load_mcp_offsets(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)167  static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
168  {
169  	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
170  	u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
171  	u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
172  	u32 drv_mb_offsize, mfw_mb_offsize;
173  	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
174  
175  	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
176  	if (!p_info->public_base) {
177  		DP_NOTICE(p_hwfn,
178  			  "The address of the MCP scratch-pad is not configured\n");
179  		return -EINVAL;
180  	}
181  
182  	p_info->public_base |= GRCBASE_MCP;
183  
184  	/* Get the MFW MB address and number of supported messages */
185  	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
186  				SECTION_OFFSIZE_ADDR(p_info->public_base,
187  						     PUBLIC_MFW_MB));
188  	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
189  	p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
190  					    p_info->mfw_mb_addr +
191  					    offsetof(struct public_mfw_mb,
192  						     sup_msgs));
193  
194  	/* The driver can notify that there was an MCP reset, and might read the
195  	 * SHMEM values before the MFW has completed initializing them.
196  	 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
197  	 * data ready indication.
198  	 */
199  	while (!p_info->mfw_mb_length && --cnt) {
200  		msleep(msec);
201  		p_info->mfw_mb_length =
202  			(u16)qed_rd(p_hwfn, p_ptt,
203  				    p_info->mfw_mb_addr +
204  				    offsetof(struct public_mfw_mb, sup_msgs));
205  	}
206  
207  	if (!cnt) {
208  		DP_NOTICE(p_hwfn,
209  			  "Failed to get the SHMEM ready notification after %d msec\n",
210  			  QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
211  		return -EBUSY;
212  	}
213  
214  	/* Calculate the driver and MFW mailbox address */
215  	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
216  				SECTION_OFFSIZE_ADDR(p_info->public_base,
217  						     PUBLIC_DRV_MB));
218  	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
219  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
220  		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
221  		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
222  
223  	/* Get the current driver mailbox sequence before sending
224  	 * the first command
225  	 */
226  	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
227  			     DRV_MSG_SEQ_NUMBER_MASK;
228  
229  	/* Get current FW pulse sequence */
230  	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
231  				DRV_PULSE_SEQ_MASK;
232  
233  	p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
234  
235  	return 0;
236  }
237  
qed_mcp_cmd_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)238  int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
239  {
240  	struct qed_mcp_info *p_info;
241  	u32 size;
242  
243  	/* Allocate mcp_info structure */
244  	p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
245  	if (!p_hwfn->mcp_info)
246  		goto err;
247  	p_info = p_hwfn->mcp_info;
248  
249  	/* Initialize the MFW spinlock */
250  	spin_lock_init(&p_info->cmd_lock);
251  	spin_lock_init(&p_info->link_lock);
252  	spin_lock_init(&p_info->unload_lock);
253  
254  	INIT_LIST_HEAD(&p_info->cmd_list);
255  
256  	if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
257  		DP_NOTICE(p_hwfn, "MCP is not initialized\n");
258  		/* Do not free mcp_info here, since public_base indicate that
259  		 * the MCP is not initialized
260  		 */
261  		return 0;
262  	}
263  
264  	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
265  	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
266  	p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
267  	if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
268  		goto err;
269  
270  	return 0;
271  
272  err:
273  	qed_mcp_free(p_hwfn);
274  	return -ENOMEM;
275  }
276  
qed_mcp_reread_offsets(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)277  static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
278  				   struct qed_ptt *p_ptt)
279  {
280  	u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
281  
282  	/* Use MCP history register to check if MCP reset occurred between init
283  	 * time and now.
284  	 */
285  	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
286  		DP_VERBOSE(p_hwfn,
287  			   QED_MSG_SP,
288  			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
289  			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
290  
291  		qed_load_mcp_offsets(p_hwfn, p_ptt);
292  		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
293  	}
294  }
295  
qed_mcp_reset(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)296  int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
297  {
298  	u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
299  	int rc = 0;
300  
301  	if (p_hwfn->mcp_info->b_block_cmd) {
302  		DP_NOTICE(p_hwfn,
303  			  "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
304  		return -EBUSY;
305  	}
306  
307  	/* Ensure that only a single thread is accessing the mailbox */
308  	spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
309  
310  	org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
311  
312  	/* Set drv command along with the updated sequence */
313  	qed_mcp_reread_offsets(p_hwfn, p_ptt);
314  	seq = ++p_hwfn->mcp_info->drv_mb_seq;
315  	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
316  
317  	do {
318  		/* Wait for MFW response */
319  		udelay(delay);
320  		/* Give the FW up to 500 second (50*1000*10usec) */
321  	} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
322  					      MISCS_REG_GENERIC_POR_0)) &&
323  		 (cnt++ < QED_MCP_RESET_RETRIES));
324  
325  	if (org_mcp_reset_seq !=
326  	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
327  		DP_VERBOSE(p_hwfn, QED_MSG_SP,
328  			   "MCP was reset after %d usec\n", cnt * delay);
329  	} else {
330  		DP_ERR(p_hwfn, "Failed to reset MCP\n");
331  		rc = -EAGAIN;
332  	}
333  
334  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
335  
336  	return rc;
337  }
338  
339  /* Must be called while cmd_lock is acquired */
qed_mcp_has_pending_cmd(struct qed_hwfn * p_hwfn)340  static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
341  {
342  	struct qed_mcp_cmd_elem *p_cmd_elem;
343  
344  	/* There is at most one pending command at a certain time, and if it
345  	 * exists - it is placed at the HEAD of the list.
346  	 */
347  	if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
348  		p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
349  					      struct qed_mcp_cmd_elem, list);
350  		return !p_cmd_elem->b_is_completed;
351  	}
352  
353  	return false;
354  }
355  
356  /* Must be called while cmd_lock is acquired */
357  static int
qed_mcp_update_pending_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)358  qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
359  {
360  	struct qed_mcp_mb_params *p_mb_params;
361  	struct qed_mcp_cmd_elem *p_cmd_elem;
362  	u32 mcp_resp;
363  	u16 seq_num;
364  
365  	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
366  	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
367  
368  	/* Return if no new non-handled response has been received */
369  	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
370  		return -EAGAIN;
371  
372  	p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
373  	if (!p_cmd_elem) {
374  		DP_ERR(p_hwfn,
375  		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
376  		       seq_num);
377  		return -EINVAL;
378  	}
379  
380  	p_mb_params = p_cmd_elem->p_mb_params;
381  
382  	/* Get the MFW response along with the sequence number */
383  	p_mb_params->mcp_resp = mcp_resp;
384  
385  	/* Get the MFW param */
386  	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
387  
388  	/* Get the union data */
389  	if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) {
390  		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
391  				      offsetof(struct public_drv_mb,
392  					       union_data);
393  		qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
394  				union_data_addr, p_mb_params->data_dst_size);
395  	}
396  
397  	p_cmd_elem->b_is_completed = true;
398  
399  	return 0;
400  }
401  
402  /* Must be called while cmd_lock is acquired */
__qed_mcp_cmd_and_union(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_mb_params * p_mb_params,u16 seq_num)403  static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
404  				    struct qed_ptt *p_ptt,
405  				    struct qed_mcp_mb_params *p_mb_params,
406  				    u16 seq_num)
407  {
408  	union drv_union_data union_data;
409  	u32 union_data_addr;
410  
411  	/* Set the union data */
412  	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
413  			  offsetof(struct public_drv_mb, union_data);
414  	memset(&union_data, 0, sizeof(union_data));
415  	if (p_mb_params->p_data_src && p_mb_params->data_src_size)
416  		memcpy(&union_data, p_mb_params->p_data_src,
417  		       p_mb_params->data_src_size);
418  	qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
419  		      sizeof(union_data));
420  
421  	/* Set the drv param */
422  	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
423  
424  	/* Set the drv command along with the sequence number */
425  	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
426  
427  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
428  		   "MFW mailbox: command 0x%08x param 0x%08x\n",
429  		   (p_mb_params->cmd | seq_num), p_mb_params->param);
430  }
431  
qed_mcp_cmd_set_blocking(struct qed_hwfn * p_hwfn,bool block_cmd)432  static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
433  {
434  	p_hwfn->mcp_info->b_block_cmd = block_cmd;
435  
436  	DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
437  		block_cmd ? "Block" : "Unblock");
438  }
439  
qed_mcp_print_cpu_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)440  static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
441  				   struct qed_ptt *p_ptt)
442  {
443  	u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
444  	u32 delay = QED_MCP_RESP_ITER_US;
445  
446  	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
447  	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
448  	cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
449  	udelay(delay);
450  	cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
451  	udelay(delay);
452  	cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
453  
454  	DP_NOTICE(p_hwfn,
455  		  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
456  		  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
457  }
458  
459  static int
_qed_mcp_cmd_and_union(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_mb_params * p_mb_params,u32 max_retries,u32 usecs)460  _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
461  		       struct qed_ptt *p_ptt,
462  		       struct qed_mcp_mb_params *p_mb_params,
463  		       u32 max_retries, u32 usecs)
464  {
465  	u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
466  	struct qed_mcp_cmd_elem *p_cmd_elem;
467  	u16 seq_num;
468  	int rc = 0;
469  
470  	/* Wait until the mailbox is non-occupied */
471  	do {
472  		/* Exit the loop if there is no pending command, or if the
473  		 * pending command is completed during this iteration.
474  		 * The spinlock stays locked until the command is sent.
475  		 */
476  
477  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
478  
479  		if (!qed_mcp_has_pending_cmd(p_hwfn))
480  			break;
481  
482  		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
483  		if (!rc)
484  			break;
485  		else if (rc != -EAGAIN)
486  			goto err;
487  
488  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
489  
490  		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
491  			msleep(msecs);
492  		else
493  			udelay(usecs);
494  	} while (++cnt < max_retries);
495  
496  	if (cnt >= max_retries) {
497  		DP_NOTICE(p_hwfn,
498  			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
499  			  p_mb_params->cmd, p_mb_params->param);
500  		return -EAGAIN;
501  	}
502  
503  	/* Send the mailbox command */
504  	qed_mcp_reread_offsets(p_hwfn, p_ptt);
505  	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
506  	p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
507  	if (!p_cmd_elem) {
508  		rc = -ENOMEM;
509  		goto err;
510  	}
511  
512  	__qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
513  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
514  
515  	/* Wait for the MFW response */
516  	do {
517  		/* Exit the loop if the command is already completed, or if the
518  		 * command is completed during this iteration.
519  		 * The spinlock stays locked until the list element is removed.
520  		 */
521  
522  		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
523  			msleep(msecs);
524  		else
525  			udelay(usecs);
526  
527  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
528  
529  		if (p_cmd_elem->b_is_completed)
530  			break;
531  
532  		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
533  		if (!rc)
534  			break;
535  		else if (rc != -EAGAIN)
536  			goto err;
537  
538  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
539  	} while (++cnt < max_retries);
540  
541  	if (cnt >= max_retries) {
542  		DP_NOTICE(p_hwfn,
543  			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
544  			  p_mb_params->cmd, p_mb_params->param);
545  		qed_mcp_print_cpu_info(p_hwfn, p_ptt);
546  
547  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
548  		qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
549  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
550  
551  		if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
552  			qed_mcp_cmd_set_blocking(p_hwfn, true);
553  
554  		qed_hw_err_notify(p_hwfn, p_ptt,
555  				  QED_HW_ERR_MFW_RESP_FAIL, NULL);
556  		return -EAGAIN;
557  	}
558  
559  	qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
560  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
561  
562  	DP_VERBOSE(p_hwfn,
563  		   QED_MSG_SP,
564  		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
565  		   p_mb_params->mcp_resp,
566  		   p_mb_params->mcp_param,
567  		   (cnt * usecs) / 1000, (cnt * usecs) % 1000);
568  
569  	/* Clear the sequence number from the MFW response */
570  	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
571  
572  	return 0;
573  
574  err:
575  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
576  	return rc;
577  }
578  
qed_mcp_cmd_and_union(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_mb_params * p_mb_params)579  static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
580  				 struct qed_ptt *p_ptt,
581  				 struct qed_mcp_mb_params *p_mb_params)
582  {
583  	size_t union_data_size = sizeof(union drv_union_data);
584  	u32 max_retries = QED_DRV_MB_MAX_RETRIES;
585  	u32 usecs = QED_MCP_RESP_ITER_US;
586  
587  	/* MCP not initialized */
588  	if (!qed_mcp_is_init(p_hwfn)) {
589  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
590  		return -EBUSY;
591  	}
592  
593  	if (p_hwfn->mcp_info->b_block_cmd) {
594  		DP_NOTICE(p_hwfn,
595  			  "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
596  			  p_mb_params->cmd, p_mb_params->param);
597  		return -EBUSY;
598  	}
599  
600  	if (p_mb_params->data_src_size > union_data_size ||
601  	    p_mb_params->data_dst_size > union_data_size) {
602  		DP_ERR(p_hwfn,
603  		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
604  		       p_mb_params->data_src_size,
605  		       p_mb_params->data_dst_size, union_data_size);
606  		return -EINVAL;
607  	}
608  
609  	if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
610  		max_retries = DIV_ROUND_UP(max_retries, 1000);
611  		usecs *= 1000;
612  	}
613  
614  	return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
615  				      usecs);
616  }
617  
_qed_mcp_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,bool can_sleep)618  static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn,
619  			struct qed_ptt *p_ptt,
620  			u32 cmd,
621  			u32 param,
622  			u32 *o_mcp_resp,
623  			u32 *o_mcp_param,
624  			bool can_sleep)
625  {
626  	struct qed_mcp_mb_params mb_params;
627  	int rc;
628  
629  	memset(&mb_params, 0, sizeof(mb_params));
630  	mb_params.cmd = cmd;
631  	mb_params.param = param;
632  	mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0;
633  
634  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
635  	if (rc)
636  		return rc;
637  
638  	*o_mcp_resp = mb_params.mcp_resp;
639  	*o_mcp_param = mb_params.mcp_param;
640  
641  	return 0;
642  }
643  
qed_mcp_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)644  int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
645  		struct qed_ptt *p_ptt,
646  		u32 cmd,
647  		u32 param,
648  		u32 *o_mcp_resp,
649  		u32 *o_mcp_param)
650  {
651  	return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
652  			     o_mcp_resp, o_mcp_param, true));
653  }
654  
qed_mcp_cmd_nosleep(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)655  int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
656  			struct qed_ptt *p_ptt,
657  			u32 cmd,
658  			u32 param,
659  			u32 *o_mcp_resp,
660  			u32 *o_mcp_param)
661  {
662  	return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
663  			     o_mcp_resp, o_mcp_param, false));
664  }
665  
666  static int
qed_mcp_nvm_wr_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 i_txn_size,u32 * i_buf)667  qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
668  		   struct qed_ptt *p_ptt,
669  		   u32 cmd,
670  		   u32 param,
671  		   u32 *o_mcp_resp,
672  		   u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
673  {
674  	struct qed_mcp_mb_params mb_params;
675  	int rc;
676  
677  	memset(&mb_params, 0, sizeof(mb_params));
678  	mb_params.cmd = cmd;
679  	mb_params.param = param;
680  	mb_params.p_data_src = i_buf;
681  	mb_params.data_src_size = (u8)i_txn_size;
682  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
683  	if (rc)
684  		return rc;
685  
686  	*o_mcp_resp = mb_params.mcp_resp;
687  	*o_mcp_param = mb_params.mcp_param;
688  
689  	/* nvm_info needs to be updated */
690  	p_hwfn->nvm_info.valid = false;
691  
692  	return 0;
693  }
694  
qed_mcp_nvm_rd_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 * o_txn_size,u32 * o_buf,bool b_can_sleep)695  int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
696  		       struct qed_ptt *p_ptt,
697  		       u32 cmd,
698  		       u32 param,
699  		       u32 *o_mcp_resp,
700  		       u32 *o_mcp_param,
701  		       u32 *o_txn_size, u32 *o_buf, bool b_can_sleep)
702  {
703  	struct qed_mcp_mb_params mb_params;
704  	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
705  	int rc;
706  
707  	memset(&mb_params, 0, sizeof(mb_params));
708  	mb_params.cmd = cmd;
709  	mb_params.param = param;
710  	mb_params.p_data_dst = raw_data;
711  
712  	/* Use the maximal value since the actual one is part of the response */
713  	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
714  	if (b_can_sleep)
715  		mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
716  
717  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
718  	if (rc)
719  		return rc;
720  
721  	*o_mcp_resp = mb_params.mcp_resp;
722  	*o_mcp_param = mb_params.mcp_param;
723  
724  	*o_txn_size = *o_mcp_param;
725  	memcpy(o_buf, raw_data, *o_txn_size);
726  
727  	return 0;
728  }
729  
730  static bool
qed_mcp_can_force_load(u8 drv_role,u8 exist_drv_role,enum qed_override_force_load override_force_load)731  qed_mcp_can_force_load(u8 drv_role,
732  		       u8 exist_drv_role,
733  		       enum qed_override_force_load override_force_load)
734  {
735  	bool can_force_load = false;
736  
737  	switch (override_force_load) {
738  	case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
739  		can_force_load = true;
740  		break;
741  	case QED_OVERRIDE_FORCE_LOAD_NEVER:
742  		can_force_load = false;
743  		break;
744  	default:
745  		can_force_load = (drv_role == DRV_ROLE_OS &&
746  				  exist_drv_role == DRV_ROLE_PREBOOT) ||
747  				 (drv_role == DRV_ROLE_KDUMP &&
748  				  exist_drv_role == DRV_ROLE_OS);
749  		break;
750  	}
751  
752  	return can_force_load;
753  }
754  
qed_mcp_cancel_load_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)755  static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
756  				   struct qed_ptt *p_ptt)
757  {
758  	u32 resp = 0, param = 0;
759  	int rc;
760  
761  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
762  			 &resp, &param);
763  	if (rc)
764  		DP_NOTICE(p_hwfn,
765  			  "Failed to send cancel load request, rc = %d\n", rc);
766  
767  	return rc;
768  }
769  
770  #define BITMAP_IDX_FOR_CONFIG_QEDE	BIT(0)
771  #define BITMAP_IDX_FOR_CONFIG_QED_SRIOV	BIT(1)
772  #define BITMAP_IDX_FOR_CONFIG_QEDR	BIT(2)
773  #define BITMAP_IDX_FOR_CONFIG_QEDF	BIT(4)
774  #define BITMAP_IDX_FOR_CONFIG_QEDI	BIT(5)
775  #define BITMAP_IDX_FOR_CONFIG_QED_LL2	BIT(6)
776  
qed_get_config_bitmap(void)777  static u32 qed_get_config_bitmap(void)
778  {
779  	u32 config_bitmap = 0x0;
780  
781  	if (IS_ENABLED(CONFIG_QEDE))
782  		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDE;
783  
784  	if (IS_ENABLED(CONFIG_QED_SRIOV))
785  		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_SRIOV;
786  
787  	if (IS_ENABLED(CONFIG_QED_RDMA))
788  		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDR;
789  
790  	if (IS_ENABLED(CONFIG_QED_FCOE))
791  		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDF;
792  
793  	if (IS_ENABLED(CONFIG_QED_ISCSI))
794  		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDI;
795  
796  	if (IS_ENABLED(CONFIG_QED_LL2))
797  		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_LL2;
798  
799  	return config_bitmap;
800  }
801  
802  struct qed_load_req_in_params {
803  	u8 hsi_ver;
804  #define QED_LOAD_REQ_HSI_VER_DEFAULT	0
805  #define QED_LOAD_REQ_HSI_VER_1		1
806  	u32 drv_ver_0;
807  	u32 drv_ver_1;
808  	u32 fw_ver;
809  	u8 drv_role;
810  	u8 timeout_val;
811  	u8 force_cmd;
812  	bool avoid_eng_reset;
813  };
814  
815  struct qed_load_req_out_params {
816  	u32 load_code;
817  	u32 exist_drv_ver_0;
818  	u32 exist_drv_ver_1;
819  	u32 exist_fw_ver;
820  	u8 exist_drv_role;
821  	u8 mfw_hsi_ver;
822  	bool drv_exists;
823  };
824  
825  static int
__qed_mcp_load_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_load_req_in_params * p_in_params,struct qed_load_req_out_params * p_out_params)826  __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
827  		   struct qed_ptt *p_ptt,
828  		   struct qed_load_req_in_params *p_in_params,
829  		   struct qed_load_req_out_params *p_out_params)
830  {
831  	struct qed_mcp_mb_params mb_params;
832  	struct load_req_stc load_req;
833  	struct load_rsp_stc load_rsp;
834  	u32 hsi_ver;
835  	int rc;
836  
837  	memset(&load_req, 0, sizeof(load_req));
838  	load_req.drv_ver_0 = p_in_params->drv_ver_0;
839  	load_req.drv_ver_1 = p_in_params->drv_ver_1;
840  	load_req.fw_ver = p_in_params->fw_ver;
841  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
842  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
843  			  p_in_params->timeout_val);
844  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
845  			  p_in_params->force_cmd);
846  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
847  			  p_in_params->avoid_eng_reset);
848  
849  	hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
850  		  DRV_ID_MCP_HSI_VER_CURRENT :
851  		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
852  
853  	memset(&mb_params, 0, sizeof(mb_params));
854  	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
855  	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
856  	mb_params.p_data_src = &load_req;
857  	mb_params.data_src_size = sizeof(load_req);
858  	mb_params.p_data_dst = &load_rsp;
859  	mb_params.data_dst_size = sizeof(load_rsp);
860  	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
861  
862  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
863  		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
864  		   mb_params.param,
865  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
866  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
867  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
868  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
869  
870  	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
871  		DP_VERBOSE(p_hwfn, QED_MSG_SP,
872  			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
873  			   load_req.drv_ver_0,
874  			   load_req.drv_ver_1,
875  			   load_req.fw_ver,
876  			   load_req.misc0,
877  			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
878  			   QED_MFW_GET_FIELD(load_req.misc0,
879  					     LOAD_REQ_LOCK_TO),
880  			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
881  			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
882  	}
883  
884  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
885  	if (rc) {
886  		DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
887  		return rc;
888  	}
889  
890  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
891  		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
892  	p_out_params->load_code = mb_params.mcp_resp;
893  
894  	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
895  	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
896  		DP_VERBOSE(p_hwfn,
897  			   QED_MSG_SP,
898  			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
899  			   load_rsp.drv_ver_0,
900  			   load_rsp.drv_ver_1,
901  			   load_rsp.fw_ver,
902  			   load_rsp.misc0,
903  			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
904  			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
905  			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
906  
907  		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
908  		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
909  		p_out_params->exist_fw_ver = load_rsp.fw_ver;
910  		p_out_params->exist_drv_role =
911  		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
912  		p_out_params->mfw_hsi_ver =
913  		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
914  		p_out_params->drv_exists =
915  		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
916  		    LOAD_RSP_FLAGS0_DRV_EXISTS;
917  	}
918  
919  	return 0;
920  }
921  
eocre_get_mfw_drv_role(struct qed_hwfn * p_hwfn,enum qed_drv_role drv_role,u8 * p_mfw_drv_role)922  static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
923  				  enum qed_drv_role drv_role,
924  				  u8 *p_mfw_drv_role)
925  {
926  	switch (drv_role) {
927  	case QED_DRV_ROLE_OS:
928  		*p_mfw_drv_role = DRV_ROLE_OS;
929  		break;
930  	case QED_DRV_ROLE_KDUMP:
931  		*p_mfw_drv_role = DRV_ROLE_KDUMP;
932  		break;
933  	default:
934  		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
935  		return -EINVAL;
936  	}
937  
938  	return 0;
939  }
940  
941  enum qed_load_req_force {
942  	QED_LOAD_REQ_FORCE_NONE,
943  	QED_LOAD_REQ_FORCE_PF,
944  	QED_LOAD_REQ_FORCE_ALL,
945  };
946  
qed_get_mfw_force_cmd(struct qed_hwfn * p_hwfn,enum qed_load_req_force force_cmd,u8 * p_mfw_force_cmd)947  static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
948  				  enum qed_load_req_force force_cmd,
949  				  u8 *p_mfw_force_cmd)
950  {
951  	switch (force_cmd) {
952  	case QED_LOAD_REQ_FORCE_NONE:
953  		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
954  		break;
955  	case QED_LOAD_REQ_FORCE_PF:
956  		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
957  		break;
958  	case QED_LOAD_REQ_FORCE_ALL:
959  		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
960  		break;
961  	}
962  }
963  
qed_mcp_load_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_load_req_params * p_params)964  int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
965  		     struct qed_ptt *p_ptt,
966  		     struct qed_load_req_params *p_params)
967  {
968  	struct qed_load_req_out_params out_params;
969  	struct qed_load_req_in_params in_params;
970  	u8 mfw_drv_role, mfw_force_cmd;
971  	int rc;
972  
973  	memset(&in_params, 0, sizeof(in_params));
974  	in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
975  	in_params.drv_ver_1 = qed_get_config_bitmap();
976  	in_params.fw_ver = STORM_FW_VERSION;
977  	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
978  	if (rc)
979  		return rc;
980  
981  	in_params.drv_role = mfw_drv_role;
982  	in_params.timeout_val = p_params->timeout_val;
983  	qed_get_mfw_force_cmd(p_hwfn,
984  			      QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
985  
986  	in_params.force_cmd = mfw_force_cmd;
987  	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
988  
989  	memset(&out_params, 0, sizeof(out_params));
990  	rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
991  	if (rc)
992  		return rc;
993  
994  	/* First handle cases where another load request should/might be sent:
995  	 * - MFW expects the old interface [HSI version = 1]
996  	 * - MFW responds that a force load request is required
997  	 */
998  	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
999  		DP_INFO(p_hwfn,
1000  			"MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
1001  
1002  		in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
1003  		memset(&out_params, 0, sizeof(out_params));
1004  		rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1005  		if (rc)
1006  			return rc;
1007  	} else if (out_params.load_code ==
1008  		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1009  		if (qed_mcp_can_force_load(in_params.drv_role,
1010  					   out_params.exist_drv_role,
1011  					   p_params->override_force_load)) {
1012  			DP_INFO(p_hwfn,
1013  				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
1014  				in_params.drv_role, in_params.fw_ver,
1015  				in_params.drv_ver_0, in_params.drv_ver_1,
1016  				out_params.exist_drv_role,
1017  				out_params.exist_fw_ver,
1018  				out_params.exist_drv_ver_0,
1019  				out_params.exist_drv_ver_1);
1020  
1021  			qed_get_mfw_force_cmd(p_hwfn,
1022  					      QED_LOAD_REQ_FORCE_ALL,
1023  					      &mfw_force_cmd);
1024  
1025  			in_params.force_cmd = mfw_force_cmd;
1026  			memset(&out_params, 0, sizeof(out_params));
1027  			rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1028  						&out_params);
1029  			if (rc)
1030  				return rc;
1031  		} else {
1032  			DP_NOTICE(p_hwfn,
1033  				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1034  				  in_params.drv_role, in_params.fw_ver,
1035  				  in_params.drv_ver_0, in_params.drv_ver_1,
1036  				  out_params.exist_drv_role,
1037  				  out_params.exist_fw_ver,
1038  				  out_params.exist_drv_ver_0,
1039  				  out_params.exist_drv_ver_1);
1040  			DP_NOTICE(p_hwfn,
1041  				  "Avoid sending a force load request to prevent disruption of active PFs\n");
1042  
1043  			qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1044  			return -EBUSY;
1045  		}
1046  	}
1047  
1048  	/* Now handle the other types of responses.
1049  	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1050  	 * expected here after the additional revised load requests were sent.
1051  	 */
1052  	switch (out_params.load_code) {
1053  	case FW_MSG_CODE_DRV_LOAD_ENGINE:
1054  	case FW_MSG_CODE_DRV_LOAD_PORT:
1055  	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1056  		if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1057  		    out_params.drv_exists) {
1058  			/* The role and fw/driver version match, but the PF is
1059  			 * already loaded and has not been unloaded gracefully.
1060  			 */
1061  			DP_NOTICE(p_hwfn,
1062  				  "PF is already loaded\n");
1063  			return -EINVAL;
1064  		}
1065  		break;
1066  	default:
1067  		DP_NOTICE(p_hwfn,
1068  			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1069  			  out_params.load_code);
1070  		return -EBUSY;
1071  	}
1072  
1073  	p_params->load_code = out_params.load_code;
1074  
1075  	return 0;
1076  }
1077  
qed_mcp_load_done(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1078  int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1079  {
1080  	u32 resp = 0, param = 0;
1081  	int rc;
1082  
1083  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1084  			 &param);
1085  	if (rc) {
1086  		DP_NOTICE(p_hwfn,
1087  			  "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1088  		return rc;
1089  	}
1090  
1091  	/* Check if there is a DID mismatch between nvm-cfg/efuse */
1092  	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1093  		DP_NOTICE(p_hwfn,
1094  			  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1095  
1096  	return 0;
1097  }
1098  
1099  #define MFW_COMPLETION_MAX_ITER 5000
1100  #define MFW_COMPLETION_INTERVAL_MS 1
1101  
qed_mcp_unload_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1102  int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1103  {
1104  	struct qed_mcp_mb_params mb_params;
1105  	u32 cnt = MFW_COMPLETION_MAX_ITER;
1106  	u32 wol_param;
1107  	int rc;
1108  
1109  	switch (p_hwfn->cdev->wol_config) {
1110  	case QED_OV_WOL_DISABLED:
1111  		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1112  		break;
1113  	case QED_OV_WOL_ENABLED:
1114  		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1115  		break;
1116  	default:
1117  		DP_NOTICE(p_hwfn,
1118  			  "Unknown WoL configuration %02x\n",
1119  			  p_hwfn->cdev->wol_config);
1120  		fallthrough;
1121  	case QED_OV_WOL_DEFAULT:
1122  		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1123  	}
1124  
1125  	memset(&mb_params, 0, sizeof(mb_params));
1126  	mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1127  	mb_params.param = wol_param;
1128  	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1129  
1130  	spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
1131  	set_bit(QED_MCP_BYPASS_PROC_BIT,
1132  		&p_hwfn->mcp_info->mcp_handling_status);
1133  	spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
1134  
1135  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1136  
1137  	while (test_bit(QED_MCP_IN_PROCESSING_BIT,
1138  			&p_hwfn->mcp_info->mcp_handling_status) && --cnt)
1139  		msleep(MFW_COMPLETION_INTERVAL_MS);
1140  
1141  	if (!cnt)
1142  		DP_NOTICE(p_hwfn,
1143  			  "Failed to wait MFW event completion after %d msec\n",
1144  			  MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS);
1145  
1146  	return rc;
1147  }
1148  
qed_mcp_unload_done(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1149  int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1150  {
1151  	struct qed_mcp_mb_params mb_params;
1152  	struct mcp_mac wol_mac;
1153  
1154  	memset(&mb_params, 0, sizeof(mb_params));
1155  	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1156  
1157  	/* Set the primary MAC if WoL is enabled */
1158  	if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1159  		u8 *p_mac = p_hwfn->cdev->wol_mac;
1160  
1161  		memset(&wol_mac, 0, sizeof(wol_mac));
1162  		wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1163  		wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1164  				    p_mac[4] << 8 | p_mac[5];
1165  
1166  		DP_VERBOSE(p_hwfn,
1167  			   (QED_MSG_SP | NETIF_MSG_IFDOWN),
1168  			   "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1169  			   p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1170  
1171  		mb_params.p_data_src = &wol_mac;
1172  		mb_params.data_src_size = sizeof(wol_mac);
1173  	}
1174  
1175  	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1176  }
1177  
qed_mcp_handle_vf_flr(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1178  static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1179  				  struct qed_ptt *p_ptt)
1180  {
1181  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1182  					PUBLIC_PATH);
1183  	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1184  	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1185  				     QED_PATH_ID(p_hwfn));
1186  	u32 disabled_vfs[VF_MAX_STATIC / 32];
1187  	int i;
1188  
1189  	DP_VERBOSE(p_hwfn,
1190  		   QED_MSG_SP,
1191  		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1192  		   mfw_path_offsize, path_addr);
1193  
1194  	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1195  		disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1196  					 path_addr +
1197  					 offsetof(struct public_path,
1198  						  mcp_vf_disabled) +
1199  					 sizeof(u32) * i);
1200  		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1201  			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1202  			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1203  	}
1204  
1205  	if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1206  		qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1207  }
1208  
qed_mcp_ack_vf_flr(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * vfs_to_ack)1209  int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1210  		       struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1211  {
1212  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1213  					PUBLIC_FUNC);
1214  	u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1215  	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1216  				     MCP_PF_ID(p_hwfn));
1217  	struct qed_mcp_mb_params mb_params;
1218  	int rc;
1219  	int i;
1220  
1221  	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1222  		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1223  			   "Acking VFs [%08x,...,%08x] - %08x\n",
1224  			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1225  
1226  	memset(&mb_params, 0, sizeof(mb_params));
1227  	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1228  	mb_params.p_data_src = vfs_to_ack;
1229  	mb_params.data_src_size = VF_MAX_STATIC / 8;
1230  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1231  	if (rc) {
1232  		DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1233  		return -EBUSY;
1234  	}
1235  
1236  	/* Clear the ACK bits */
1237  	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1238  		qed_wr(p_hwfn, p_ptt,
1239  		       func_addr +
1240  		       offsetof(struct public_func, drv_ack_vf_disabled) +
1241  		       i * sizeof(u32), 0);
1242  
1243  	return rc;
1244  }
1245  
qed_mcp_handle_transceiver_change(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1246  static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1247  					      struct qed_ptt *p_ptt)
1248  {
1249  	u32 transceiver_state;
1250  
1251  	transceiver_state = qed_rd(p_hwfn, p_ptt,
1252  				   p_hwfn->mcp_info->port_addr +
1253  				   offsetof(struct public_port,
1254  					    transceiver_data));
1255  
1256  	DP_VERBOSE(p_hwfn,
1257  		   (NETIF_MSG_HW | QED_MSG_SP),
1258  		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1259  		   transceiver_state,
1260  		   (u32)(p_hwfn->mcp_info->port_addr +
1261  			  offsetof(struct public_port, transceiver_data)));
1262  
1263  	transceiver_state = GET_FIELD(transceiver_state,
1264  				      ETH_TRANSCEIVER_STATE);
1265  
1266  	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1267  		DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1268  	else
1269  		DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1270  }
1271  
qed_mcp_read_eee_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_link_state * p_link)1272  static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1273  				    struct qed_ptt *p_ptt,
1274  				    struct qed_mcp_link_state *p_link)
1275  {
1276  	u32 eee_status, val;
1277  
1278  	p_link->eee_adv_caps = 0;
1279  	p_link->eee_lp_adv_caps = 0;
1280  	eee_status = qed_rd(p_hwfn,
1281  			    p_ptt,
1282  			    p_hwfn->mcp_info->port_addr +
1283  			    offsetof(struct public_port, eee_status));
1284  	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1285  	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1286  	if (val & EEE_1G_ADV)
1287  		p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1288  	if (val & EEE_10G_ADV)
1289  		p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1290  	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1291  	if (val & EEE_1G_ADV)
1292  		p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1293  	if (val & EEE_10G_ADV)
1294  		p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1295  }
1296  
qed_mcp_get_shmem_func(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct public_func * p_data,int pfid)1297  static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1298  				  struct qed_ptt *p_ptt,
1299  				  struct public_func *p_data, int pfid)
1300  {
1301  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1302  					PUBLIC_FUNC);
1303  	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1304  	u32 func_addr;
1305  	u32 i, size;
1306  
1307  	func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1308  	memset(p_data, 0, sizeof(*p_data));
1309  
1310  	size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1311  	for (i = 0; i < size / sizeof(u32); i++)
1312  		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1313  					    func_addr + (i << 2));
1314  	return size;
1315  }
1316  
qed_read_pf_bandwidth(struct qed_hwfn * p_hwfn,struct public_func * p_shmem_info)1317  static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1318  				  struct public_func *p_shmem_info)
1319  {
1320  	struct qed_mcp_function_info *p_info;
1321  
1322  	p_info = &p_hwfn->mcp_info->func_info;
1323  
1324  	p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1325  						  FUNC_MF_CFG_MIN_BW);
1326  	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1327  		DP_INFO(p_hwfn,
1328  			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1329  			p_info->bandwidth_min);
1330  		p_info->bandwidth_min = 1;
1331  	}
1332  
1333  	p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1334  						  FUNC_MF_CFG_MAX_BW);
1335  	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1336  		DP_INFO(p_hwfn,
1337  			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1338  			p_info->bandwidth_max);
1339  		p_info->bandwidth_max = 100;
1340  	}
1341  }
1342  
qed_mcp_handle_link_change(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool b_reset)1343  static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1344  				       struct qed_ptt *p_ptt, bool b_reset)
1345  {
1346  	struct qed_mcp_link_state *p_link;
1347  	u8 max_bw, min_bw;
1348  	u32 status = 0;
1349  
1350  	/* Prevent SW/attentions from doing this at the same time */
1351  	spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1352  
1353  	p_link = &p_hwfn->mcp_info->link_output;
1354  	memset(p_link, 0, sizeof(*p_link));
1355  	if (!b_reset) {
1356  		status = qed_rd(p_hwfn, p_ptt,
1357  				p_hwfn->mcp_info->port_addr +
1358  				offsetof(struct public_port, link_status));
1359  		DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1360  			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1361  			   status,
1362  			   (u32)(p_hwfn->mcp_info->port_addr +
1363  				 offsetof(struct public_port, link_status)));
1364  	} else {
1365  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1366  			   "Resetting link indications\n");
1367  		goto out;
1368  	}
1369  
1370  	if (p_hwfn->b_drv_link_init) {
1371  		/* Link indication with modern MFW arrives as per-PF
1372  		 * indication.
1373  		 */
1374  		if (p_hwfn->mcp_info->capabilities &
1375  		    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1376  			struct public_func shmem_info;
1377  
1378  			qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1379  					       MCP_PF_ID(p_hwfn));
1380  			p_link->link_up = !!(shmem_info.status &
1381  					     FUNC_STATUS_VIRTUAL_LINK_UP);
1382  			qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1383  			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1384  				   "Virtual link_up = %d\n", p_link->link_up);
1385  		} else {
1386  			p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1387  			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1388  				   "Physical link_up = %d\n", p_link->link_up);
1389  		}
1390  	} else {
1391  		p_link->link_up = false;
1392  	}
1393  
1394  	p_link->full_duplex = true;
1395  	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1396  	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1397  		p_link->speed = 100000;
1398  		break;
1399  	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1400  		p_link->speed = 50000;
1401  		break;
1402  	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1403  		p_link->speed = 40000;
1404  		break;
1405  	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1406  		p_link->speed = 25000;
1407  		break;
1408  	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1409  		p_link->speed = 20000;
1410  		break;
1411  	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1412  		p_link->speed = 10000;
1413  		break;
1414  	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1415  		p_link->full_duplex = false;
1416  		fallthrough;
1417  	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1418  		p_link->speed = 1000;
1419  		break;
1420  	default:
1421  		p_link->speed = 0;
1422  		p_link->link_up = 0;
1423  	}
1424  
1425  	if (p_link->link_up && p_link->speed)
1426  		p_link->line_speed = p_link->speed;
1427  	else
1428  		p_link->line_speed = 0;
1429  
1430  	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1431  	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1432  
1433  	/* Max bandwidth configuration */
1434  	__qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1435  
1436  	/* Min bandwidth configuration */
1437  	__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1438  	qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1439  					    p_link->min_pf_rate);
1440  
1441  	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1442  	p_link->an_complete = !!(status &
1443  				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1444  	p_link->parallel_detection = !!(status &
1445  					LINK_STATUS_PARALLEL_DETECTION_USED);
1446  	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1447  
1448  	p_link->partner_adv_speed |=
1449  		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1450  		QED_LINK_PARTNER_SPEED_1G_FD : 0;
1451  	p_link->partner_adv_speed |=
1452  		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1453  		QED_LINK_PARTNER_SPEED_1G_HD : 0;
1454  	p_link->partner_adv_speed |=
1455  		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1456  		QED_LINK_PARTNER_SPEED_10G : 0;
1457  	p_link->partner_adv_speed |=
1458  		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1459  		QED_LINK_PARTNER_SPEED_20G : 0;
1460  	p_link->partner_adv_speed |=
1461  		(status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1462  		QED_LINK_PARTNER_SPEED_25G : 0;
1463  	p_link->partner_adv_speed |=
1464  		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1465  		QED_LINK_PARTNER_SPEED_40G : 0;
1466  	p_link->partner_adv_speed |=
1467  		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1468  		QED_LINK_PARTNER_SPEED_50G : 0;
1469  	p_link->partner_adv_speed |=
1470  		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1471  		QED_LINK_PARTNER_SPEED_100G : 0;
1472  
1473  	p_link->partner_tx_flow_ctrl_en =
1474  		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1475  	p_link->partner_rx_flow_ctrl_en =
1476  		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1477  
1478  	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1479  	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1480  		p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1481  		break;
1482  	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1483  		p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1484  		break;
1485  	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1486  		p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1487  		break;
1488  	default:
1489  		p_link->partner_adv_pause = 0;
1490  	}
1491  
1492  	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1493  
1494  	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1495  		qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1496  
1497  	if (p_hwfn->mcp_info->capabilities &
1498  	    FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1499  		switch (status & LINK_STATUS_FEC_MODE_MASK) {
1500  		case LINK_STATUS_FEC_MODE_NONE:
1501  			p_link->fec_active = QED_FEC_MODE_NONE;
1502  			break;
1503  		case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
1504  			p_link->fec_active = QED_FEC_MODE_FIRECODE;
1505  			break;
1506  		case LINK_STATUS_FEC_MODE_RS_CL91:
1507  			p_link->fec_active = QED_FEC_MODE_RS;
1508  			break;
1509  		default:
1510  			p_link->fec_active = QED_FEC_MODE_AUTO;
1511  		}
1512  	} else {
1513  		p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
1514  	}
1515  
1516  	qed_link_update(p_hwfn, p_ptt);
1517  out:
1518  	spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1519  }
1520  
qed_mcp_set_link(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool b_up)1521  int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1522  {
1523  	struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1524  	struct qed_mcp_mb_params mb_params;
1525  	struct eth_phy_cfg phy_cfg;
1526  	u32 cmd, fec_bit = 0;
1527  	u32 val, ext_speed;
1528  	int rc = 0;
1529  
1530  	/* Set the shmem configuration according to params */
1531  	memset(&phy_cfg, 0, sizeof(phy_cfg));
1532  	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1533  	if (!params->speed.autoneg)
1534  		phy_cfg.speed = params->speed.forced_speed;
1535  	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1536  	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1537  	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1538  	phy_cfg.adv_speed = params->speed.advertised_speeds;
1539  	phy_cfg.loopback_mode = params->loopback_mode;
1540  
1541  	/* There are MFWs that share this capability regardless of whether
1542  	 * this is feasible or not. And given that at the very least adv_caps
1543  	 * would be set internally by qed, we want to make sure LFA would
1544  	 * still work.
1545  	 */
1546  	if ((p_hwfn->mcp_info->capabilities &
1547  	     FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1548  		phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1549  		if (params->eee.tx_lpi_enable)
1550  			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1551  		if (params->eee.adv_caps & QED_EEE_1G_ADV)
1552  			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1553  		if (params->eee.adv_caps & QED_EEE_10G_ADV)
1554  			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1555  		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1556  				    EEE_TX_TIMER_USEC_OFFSET) &
1557  				   EEE_TX_TIMER_USEC_MASK;
1558  	}
1559  
1560  	if (p_hwfn->mcp_info->capabilities &
1561  	    FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1562  		if (params->fec & QED_FEC_MODE_NONE)
1563  			fec_bit |= FEC_FORCE_MODE_NONE;
1564  		else if (params->fec & QED_FEC_MODE_FIRECODE)
1565  			fec_bit |= FEC_FORCE_MODE_FIRECODE;
1566  		else if (params->fec & QED_FEC_MODE_RS)
1567  			fec_bit |= FEC_FORCE_MODE_RS;
1568  		else if (params->fec & QED_FEC_MODE_AUTO)
1569  			fec_bit |= FEC_FORCE_MODE_AUTO;
1570  
1571  		SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
1572  	}
1573  
1574  	if (p_hwfn->mcp_info->capabilities &
1575  	    FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
1576  		ext_speed = 0;
1577  		if (params->ext_speed.autoneg)
1578  			ext_speed |= ETH_EXT_SPEED_NONE;
1579  
1580  		val = params->ext_speed.forced_speed;
1581  		if (val & QED_EXT_SPEED_1G)
1582  			ext_speed |= ETH_EXT_SPEED_1G;
1583  		if (val & QED_EXT_SPEED_10G)
1584  			ext_speed |= ETH_EXT_SPEED_10G;
1585  		if (val & QED_EXT_SPEED_25G)
1586  			ext_speed |= ETH_EXT_SPEED_25G;
1587  		if (val & QED_EXT_SPEED_40G)
1588  			ext_speed |= ETH_EXT_SPEED_40G;
1589  		if (val & QED_EXT_SPEED_50G_R)
1590  			ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
1591  		if (val & QED_EXT_SPEED_50G_R2)
1592  			ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
1593  		if (val & QED_EXT_SPEED_100G_R2)
1594  			ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
1595  		if (val & QED_EXT_SPEED_100G_R4)
1596  			ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
1597  		if (val & QED_EXT_SPEED_100G_P4)
1598  			ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
1599  
1600  		SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
1601  			      ext_speed);
1602  
1603  		ext_speed = 0;
1604  
1605  		val = params->ext_speed.advertised_speeds;
1606  		if (val & QED_EXT_SPEED_MASK_1G)
1607  			ext_speed |= ETH_EXT_ADV_SPEED_1G;
1608  		if (val & QED_EXT_SPEED_MASK_10G)
1609  			ext_speed |= ETH_EXT_ADV_SPEED_10G;
1610  		if (val & QED_EXT_SPEED_MASK_25G)
1611  			ext_speed |= ETH_EXT_ADV_SPEED_25G;
1612  		if (val & QED_EXT_SPEED_MASK_40G)
1613  			ext_speed |= ETH_EXT_ADV_SPEED_40G;
1614  		if (val & QED_EXT_SPEED_MASK_50G_R)
1615  			ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
1616  		if (val & QED_EXT_SPEED_MASK_50G_R2)
1617  			ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
1618  		if (val & QED_EXT_SPEED_MASK_100G_R2)
1619  			ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
1620  		if (val & QED_EXT_SPEED_MASK_100G_R4)
1621  			ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
1622  		if (val & QED_EXT_SPEED_MASK_100G_P4)
1623  			ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
1624  
1625  		phy_cfg.extended_speed |= ext_speed;
1626  
1627  		SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
1628  			      params->ext_fec_mode);
1629  	}
1630  
1631  	p_hwfn->b_drv_link_init = b_up;
1632  
1633  	if (b_up) {
1634  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1635  			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
1636  			   phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1637  			   phy_cfg.loopback_mode, phy_cfg.fec_mode,
1638  			   phy_cfg.extended_speed);
1639  	} else {
1640  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
1641  	}
1642  
1643  	memset(&mb_params, 0, sizeof(mb_params));
1644  	mb_params.cmd = cmd;
1645  	mb_params.p_data_src = &phy_cfg;
1646  	mb_params.data_src_size = sizeof(phy_cfg);
1647  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1648  
1649  	/* if mcp fails to respond we must abort */
1650  	if (rc) {
1651  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1652  		return rc;
1653  	}
1654  
1655  	/* Mimic link-change attention, done for several reasons:
1656  	 *  - On reset, there's no guarantee MFW would trigger
1657  	 *    an attention.
1658  	 *  - On initialization, older MFWs might not indicate link change
1659  	 *    during LFA, so we'll never get an UP indication.
1660  	 */
1661  	qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1662  
1663  	return 0;
1664  }
1665  
qed_get_process_kill_counter(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1666  u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1667  				 struct qed_ptt *p_ptt)
1668  {
1669  	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1670  
1671  	if (IS_VF(p_hwfn->cdev))
1672  		return -EINVAL;
1673  
1674  	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1675  						 PUBLIC_PATH);
1676  	path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1677  	path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1678  
1679  	proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1680  			       path_addr +
1681  			       offsetof(struct public_path, process_kill)) &
1682  			PROCESS_KILL_COUNTER_MASK;
1683  
1684  	return proc_kill_cnt;
1685  }
1686  
qed_mcp_handle_process_kill(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1687  static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1688  					struct qed_ptt *p_ptt)
1689  {
1690  	struct qed_dev *cdev = p_hwfn->cdev;
1691  	u32 proc_kill_cnt;
1692  
1693  	/* Prevent possible attentions/interrupts during the recovery handling
1694  	 * and till its load phase, during which they will be re-enabled.
1695  	 */
1696  	qed_int_igu_disable_int(p_hwfn, p_ptt);
1697  
1698  	DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1699  
1700  	/* The following operations should be done once, and thus in CMT mode
1701  	 * are carried out by only the first HW function.
1702  	 */
1703  	if (p_hwfn != QED_LEADING_HWFN(cdev))
1704  		return;
1705  
1706  	if (cdev->recov_in_prog) {
1707  		DP_NOTICE(p_hwfn,
1708  			  "Ignoring the indication since a recovery process is already in progress\n");
1709  		return;
1710  	}
1711  
1712  	cdev->recov_in_prog = true;
1713  
1714  	proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1715  	DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1716  
1717  	qed_schedule_recovery_handler(p_hwfn);
1718  }
1719  
qed_mcp_send_protocol_stats(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum MFW_DRV_MSG_TYPE type)1720  static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1721  					struct qed_ptt *p_ptt,
1722  					enum MFW_DRV_MSG_TYPE type)
1723  {
1724  	enum qed_mcp_protocol_type stats_type;
1725  	union qed_mcp_protocol_stats stats;
1726  	struct qed_mcp_mb_params mb_params;
1727  	u32 hsi_param;
1728  
1729  	switch (type) {
1730  	case MFW_DRV_MSG_GET_LAN_STATS:
1731  		stats_type = QED_MCP_LAN_STATS;
1732  		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1733  		break;
1734  	case MFW_DRV_MSG_GET_FCOE_STATS:
1735  		stats_type = QED_MCP_FCOE_STATS;
1736  		hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1737  		break;
1738  	case MFW_DRV_MSG_GET_ISCSI_STATS:
1739  		stats_type = QED_MCP_ISCSI_STATS;
1740  		hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1741  		break;
1742  	case MFW_DRV_MSG_GET_RDMA_STATS:
1743  		stats_type = QED_MCP_RDMA_STATS;
1744  		hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1745  		break;
1746  	default:
1747  		DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1748  		return;
1749  	}
1750  
1751  	qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1752  
1753  	memset(&mb_params, 0, sizeof(mb_params));
1754  	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1755  	mb_params.param = hsi_param;
1756  	mb_params.p_data_src = &stats;
1757  	mb_params.data_src_size = sizeof(stats);
1758  	qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1759  }
1760  
qed_mcp_update_bw(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1761  static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1762  {
1763  	struct qed_mcp_function_info *p_info;
1764  	struct public_func shmem_info;
1765  	u32 resp = 0, param = 0;
1766  
1767  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1768  
1769  	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1770  
1771  	p_info = &p_hwfn->mcp_info->func_info;
1772  
1773  	qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1774  	qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1775  
1776  	/* Acknowledge the MFW */
1777  	qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1778  			    &param);
1779  }
1780  
qed_mcp_update_stag(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1781  static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1782  {
1783  	struct public_func shmem_info;
1784  	u32 resp = 0, param = 0;
1785  
1786  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1787  
1788  	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1789  						 FUNC_MF_CFG_OV_STAG_MASK;
1790  	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1791  	if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1792  		if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1793  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1794  			       p_hwfn->hw_info.ovlan);
1795  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1796  
1797  			/* Configure DB to add external vlan to EDPM packets */
1798  			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1799  			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1800  			       p_hwfn->hw_info.ovlan);
1801  		} else {
1802  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1803  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1804  			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1805  			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1806  		}
1807  
1808  		qed_sp_pf_update_stag(p_hwfn);
1809  	}
1810  
1811  	DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1812  		   p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1813  
1814  	/* Acknowledge the MFW */
1815  	qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1816  			    &resp, &param);
1817  }
1818  
qed_mcp_handle_fan_failure(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1819  static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1820  				       struct qed_ptt *p_ptt)
1821  {
1822  	/* A single notification should be sent to upper driver in CMT mode */
1823  	if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1824  		return;
1825  
1826  	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1827  			  "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1828  }
1829  
1830  struct qed_mdump_cmd_params {
1831  	u32 cmd;
1832  	void *p_data_src;
1833  	u8 data_src_size;
1834  	void *p_data_dst;
1835  	u8 data_dst_size;
1836  	u32 mcp_resp;
1837  };
1838  
1839  static int
qed_mcp_mdump_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mdump_cmd_params * p_mdump_cmd_params)1840  qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1841  		  struct qed_ptt *p_ptt,
1842  		  struct qed_mdump_cmd_params *p_mdump_cmd_params)
1843  {
1844  	struct qed_mcp_mb_params mb_params;
1845  	int rc;
1846  
1847  	memset(&mb_params, 0, sizeof(mb_params));
1848  	mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1849  	mb_params.param = p_mdump_cmd_params->cmd;
1850  	mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1851  	mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1852  	mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1853  	mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1854  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1855  	if (rc)
1856  		return rc;
1857  
1858  	p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1859  
1860  	if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1861  		DP_INFO(p_hwfn,
1862  			"The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1863  			p_mdump_cmd_params->cmd);
1864  		rc = -EOPNOTSUPP;
1865  	} else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1866  		DP_INFO(p_hwfn,
1867  			"The mdump command is not supported by the MFW\n");
1868  		rc = -EOPNOTSUPP;
1869  	}
1870  
1871  	return rc;
1872  }
1873  
qed_mcp_mdump_ack(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1874  static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1875  {
1876  	struct qed_mdump_cmd_params mdump_cmd_params;
1877  
1878  	memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1879  	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1880  
1881  	return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1882  }
1883  
1884  int
qed_mcp_mdump_get_retain(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct mdump_retain_data_stc * p_mdump_retain)1885  qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1886  			 struct qed_ptt *p_ptt,
1887  			 struct mdump_retain_data_stc *p_mdump_retain)
1888  {
1889  	struct qed_mdump_cmd_params mdump_cmd_params;
1890  	int rc;
1891  
1892  	memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1893  	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1894  	mdump_cmd_params.p_data_dst = p_mdump_retain;
1895  	mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1896  
1897  	rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1898  	if (rc)
1899  		return rc;
1900  
1901  	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1902  		DP_INFO(p_hwfn,
1903  			"Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1904  			mdump_cmd_params.mcp_resp);
1905  		return -EINVAL;
1906  	}
1907  
1908  	return 0;
1909  }
1910  
qed_mcp_handle_critical_error(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1911  static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1912  					  struct qed_ptt *p_ptt)
1913  {
1914  	struct mdump_retain_data_stc mdump_retain;
1915  	int rc;
1916  
1917  	/* In CMT mode - no need for more than a single acknowledgment to the
1918  	 * MFW, and no more than a single notification to the upper driver.
1919  	 */
1920  	if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1921  		return;
1922  
1923  	rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1924  	if (rc == 0 && mdump_retain.valid)
1925  		DP_NOTICE(p_hwfn,
1926  			  "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1927  			  mdump_retain.epoch,
1928  			  mdump_retain.pf, mdump_retain.status);
1929  	else
1930  		DP_NOTICE(p_hwfn,
1931  			  "The MFW notified that a critical error occurred in the device\n");
1932  
1933  	DP_NOTICE(p_hwfn,
1934  		  "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1935  	qed_mcp_mdump_ack(p_hwfn, p_ptt);
1936  
1937  	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
1938  }
1939  
qed_mcp_read_ufp_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1940  void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1941  {
1942  	struct public_func shmem_info;
1943  	u32 port_cfg, val;
1944  
1945  	if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1946  		return;
1947  
1948  	memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1949  	port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1950  			  offsetof(struct public_port, oem_cfg_port));
1951  	val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1952  		OEM_CFG_CHANNEL_TYPE_OFFSET;
1953  	if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1954  		DP_NOTICE(p_hwfn,
1955  			  "Incorrect UFP Channel type  %d port_id 0x%02x\n",
1956  			  val, MFW_PORT(p_hwfn));
1957  
1958  	val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1959  	if (val == OEM_CFG_SCHED_TYPE_ETS) {
1960  		p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1961  	} else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1962  		p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1963  	} else {
1964  		p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1965  		DP_NOTICE(p_hwfn,
1966  			  "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1967  			  val, MFW_PORT(p_hwfn));
1968  	}
1969  
1970  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1971  	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1972  		OEM_CFG_FUNC_TC_OFFSET;
1973  	p_hwfn->ufp_info.tc = (u8)val;
1974  	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1975  		OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1976  	if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1977  		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1978  	} else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1979  		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1980  	} else {
1981  		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1982  		DP_NOTICE(p_hwfn,
1983  			  "Unknown Host priority control %d port_id 0x%02x\n",
1984  			  val, MFW_PORT(p_hwfn));
1985  	}
1986  
1987  	DP_NOTICE(p_hwfn,
1988  		  "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1989  		  p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1990  		  p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1991  }
1992  
1993  static int
qed_mcp_handle_ufp_event(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1994  qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1995  {
1996  	qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1997  
1998  	if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1999  		p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2000  		qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
2001  					   p_hwfn->ufp_info.tc);
2002  
2003  		qed_qm_reconf(p_hwfn, p_ptt);
2004  	} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
2005  		/* Merge UFP TC with the dcbx TC data */
2006  		qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2007  					  QED_DCBX_OPERATIONAL_MIB);
2008  	} else {
2009  		DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
2010  		return -EINVAL;
2011  	}
2012  
2013  	/* update storm FW with negotiation results */
2014  	qed_sp_pf_update_ufp(p_hwfn);
2015  
2016  	/* update stag pcp value */
2017  	qed_sp_pf_update_stag(p_hwfn);
2018  
2019  	return 0;
2020  }
2021  
qed_mcp_handle_events(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2022  int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
2023  			  struct qed_ptt *p_ptt)
2024  {
2025  	struct qed_mcp_info *info = p_hwfn->mcp_info;
2026  	int rc = 0;
2027  	bool found = false;
2028  	u16 i;
2029  
2030  	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
2031  
2032  	/* Read Messages from MFW */
2033  	qed_mcp_read_mb(p_hwfn, p_ptt);
2034  
2035  	/* Compare current messages to old ones */
2036  	for (i = 0; i < info->mfw_mb_length; i++) {
2037  		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2038  			continue;
2039  
2040  		found = true;
2041  
2042  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
2043  			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2044  			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2045  
2046  		spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
2047  		if (test_bit(QED_MCP_BYPASS_PROC_BIT,
2048  			     &p_hwfn->mcp_info->mcp_handling_status)) {
2049  			spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
2050  			DP_INFO(p_hwfn,
2051  				"Msg [%d] is bypassed on unload flow\n", i);
2052  			continue;
2053  		}
2054  
2055  		set_bit(QED_MCP_IN_PROCESSING_BIT,
2056  			&p_hwfn->mcp_info->mcp_handling_status);
2057  		spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
2058  
2059  		switch (i) {
2060  		case MFW_DRV_MSG_LINK_CHANGE:
2061  			qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
2062  			break;
2063  		case MFW_DRV_MSG_VF_DISABLED:
2064  			qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
2065  			break;
2066  		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2067  			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2068  						  QED_DCBX_REMOTE_LLDP_MIB);
2069  			break;
2070  		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2071  			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2072  						  QED_DCBX_REMOTE_MIB);
2073  			break;
2074  		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2075  			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2076  						  QED_DCBX_OPERATIONAL_MIB);
2077  			break;
2078  		case MFW_DRV_MSG_OEM_CFG_UPDATE:
2079  			qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
2080  			break;
2081  		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2082  			qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2083  			break;
2084  		case MFW_DRV_MSG_ERROR_RECOVERY:
2085  			qed_mcp_handle_process_kill(p_hwfn, p_ptt);
2086  			break;
2087  		case MFW_DRV_MSG_GET_LAN_STATS:
2088  		case MFW_DRV_MSG_GET_FCOE_STATS:
2089  		case MFW_DRV_MSG_GET_ISCSI_STATS:
2090  		case MFW_DRV_MSG_GET_RDMA_STATS:
2091  			qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2092  			break;
2093  		case MFW_DRV_MSG_BW_UPDATE:
2094  			qed_mcp_update_bw(p_hwfn, p_ptt);
2095  			break;
2096  		case MFW_DRV_MSG_S_TAG_UPDATE:
2097  			qed_mcp_update_stag(p_hwfn, p_ptt);
2098  			break;
2099  		case MFW_DRV_MSG_FAILURE_DETECTED:
2100  			qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
2101  			break;
2102  		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2103  			qed_mcp_handle_critical_error(p_hwfn, p_ptt);
2104  			break;
2105  		case MFW_DRV_MSG_GET_TLV_REQ:
2106  			qed_mfw_tlv_req(p_hwfn);
2107  			break;
2108  		default:
2109  			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2110  			rc = -EINVAL;
2111  		}
2112  
2113  		clear_bit(QED_MCP_IN_PROCESSING_BIT,
2114  			  &p_hwfn->mcp_info->mcp_handling_status);
2115  	}
2116  
2117  	/* ACK everything */
2118  	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2119  		__be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
2120  
2121  		/* MFW expect answer in BE, so we force write in that format */
2122  		qed_wr(p_hwfn, p_ptt,
2123  		       info->mfw_mb_addr + sizeof(u32) +
2124  		       MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2125  		       sizeof(u32) + i * sizeof(u32),
2126  		       (__force u32)val);
2127  	}
2128  
2129  	if (!found) {
2130  		DP_NOTICE(p_hwfn,
2131  			  "Received an MFW message indication but no new message!\n");
2132  		rc = -EINVAL;
2133  	}
2134  
2135  	/* Copy the new mfw messages into the shadow */
2136  	memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2137  
2138  	return rc;
2139  }
2140  
qed_mcp_get_mfw_ver(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_mfw_ver,u32 * p_running_bundle_id)2141  int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
2142  			struct qed_ptt *p_ptt,
2143  			u32 *p_mfw_ver, u32 *p_running_bundle_id)
2144  {
2145  	u32 global_offsize, public_base;
2146  
2147  	if (IS_VF(p_hwfn->cdev)) {
2148  		if (p_hwfn->vf_iov_info) {
2149  			struct pfvf_acquire_resp_tlv *p_resp;
2150  
2151  			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2152  			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2153  			return 0;
2154  		} else {
2155  			DP_VERBOSE(p_hwfn,
2156  				   QED_MSG_IOV,
2157  				   "VF requested MFW version prior to ACQUIRE\n");
2158  			return -EINVAL;
2159  		}
2160  	}
2161  
2162  	public_base = p_hwfn->mcp_info->public_base;
2163  	global_offsize = qed_rd(p_hwfn, p_ptt,
2164  				SECTION_OFFSIZE_ADDR(public_base,
2165  						     PUBLIC_GLOBAL));
2166  	*p_mfw_ver =
2167  	    qed_rd(p_hwfn, p_ptt,
2168  		   SECTION_ADDR(global_offsize,
2169  				0) + offsetof(struct public_global, mfw_ver));
2170  
2171  	if (p_running_bundle_id) {
2172  		*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
2173  					      SECTION_ADDR(global_offsize, 0) +
2174  					      offsetof(struct public_global,
2175  						       running_bundle_id));
2176  	}
2177  
2178  	return 0;
2179  }
2180  
qed_mcp_get_mbi_ver(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_mbi_ver)2181  int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
2182  			struct qed_ptt *p_ptt, u32 *p_mbi_ver)
2183  {
2184  	u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2185  
2186  	if (IS_VF(p_hwfn->cdev))
2187  		return -EINVAL;
2188  
2189  	/* Read the address of the nvm_cfg */
2190  	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2191  	if (!nvm_cfg_addr) {
2192  		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2193  		return -EINVAL;
2194  	}
2195  
2196  	/* Read the offset of nvm_cfg1 */
2197  	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2198  
2199  	mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2200  		       offsetof(struct nvm_cfg1, glob) +
2201  		       offsetof(struct nvm_cfg1_glob, mbi_version);
2202  	*p_mbi_ver = qed_rd(p_hwfn, p_ptt,
2203  			    mbi_ver_addr) &
2204  		     (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2205  		      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2206  		      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2207  
2208  	return 0;
2209  }
2210  
qed_mcp_get_media_type(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_media_type)2211  int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
2212  			   struct qed_ptt *p_ptt, u32 *p_media_type)
2213  {
2214  	*p_media_type = MEDIA_UNSPECIFIED;
2215  
2216  	if (IS_VF(p_hwfn->cdev))
2217  		return -EINVAL;
2218  
2219  	if (!qed_mcp_is_init(p_hwfn)) {
2220  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2221  		return -EBUSY;
2222  	}
2223  
2224  	if (!p_ptt) {
2225  		*p_media_type = MEDIA_UNSPECIFIED;
2226  		return -EINVAL;
2227  	}
2228  
2229  	*p_media_type = qed_rd(p_hwfn, p_ptt,
2230  			       p_hwfn->mcp_info->port_addr +
2231  			       offsetof(struct public_port,
2232  					media_type));
2233  
2234  	return 0;
2235  }
2236  
qed_mcp_get_transceiver_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_transceiver_state,u32 * p_transceiver_type)2237  int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2238  				 struct qed_ptt *p_ptt,
2239  				 u32 *p_transceiver_state,
2240  				 u32 *p_transceiver_type)
2241  {
2242  	u32 transceiver_info;
2243  
2244  	*p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2245  	*p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2246  
2247  	if (IS_VF(p_hwfn->cdev))
2248  		return -EINVAL;
2249  
2250  	if (!qed_mcp_is_init(p_hwfn)) {
2251  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2252  		return -EBUSY;
2253  	}
2254  
2255  	transceiver_info = qed_rd(p_hwfn, p_ptt,
2256  				  p_hwfn->mcp_info->port_addr +
2257  				  offsetof(struct public_port,
2258  					   transceiver_data));
2259  
2260  	*p_transceiver_state = (transceiver_info &
2261  				ETH_TRANSCEIVER_STATE_MASK) >>
2262  				ETH_TRANSCEIVER_STATE_OFFSET;
2263  
2264  	if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2265  		*p_transceiver_type = (transceiver_info &
2266  				       ETH_TRANSCEIVER_TYPE_MASK) >>
2267  				       ETH_TRANSCEIVER_TYPE_OFFSET;
2268  	else
2269  		*p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2270  
2271  	return 0;
2272  }
2273  
qed_is_transceiver_ready(u32 transceiver_state,u32 transceiver_type)2274  static bool qed_is_transceiver_ready(u32 transceiver_state,
2275  				     u32 transceiver_type)
2276  {
2277  	if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2278  	    ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2279  	    (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2280  		return true;
2281  
2282  	return false;
2283  }
2284  
qed_mcp_trans_speed_mask(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_speed_mask)2285  int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2286  			     struct qed_ptt *p_ptt, u32 *p_speed_mask)
2287  {
2288  	u32 transceiver_type, transceiver_state;
2289  	int ret;
2290  
2291  	ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2292  					   &transceiver_type);
2293  	if (ret)
2294  		return ret;
2295  
2296  	if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2297  				     false)
2298  		return -EINVAL;
2299  
2300  	switch (transceiver_type) {
2301  	case ETH_TRANSCEIVER_TYPE_1G_LX:
2302  	case ETH_TRANSCEIVER_TYPE_1G_SX:
2303  	case ETH_TRANSCEIVER_TYPE_1G_PCC:
2304  	case ETH_TRANSCEIVER_TYPE_1G_ACC:
2305  	case ETH_TRANSCEIVER_TYPE_1000BASET:
2306  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2307  		break;
2308  	case ETH_TRANSCEIVER_TYPE_10G_SR:
2309  	case ETH_TRANSCEIVER_TYPE_10G_LR:
2310  	case ETH_TRANSCEIVER_TYPE_10G_LRM:
2311  	case ETH_TRANSCEIVER_TYPE_10G_ER:
2312  	case ETH_TRANSCEIVER_TYPE_10G_PCC:
2313  	case ETH_TRANSCEIVER_TYPE_10G_ACC:
2314  	case ETH_TRANSCEIVER_TYPE_4x10G:
2315  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2316  		break;
2317  	case ETH_TRANSCEIVER_TYPE_40G_LR4:
2318  	case ETH_TRANSCEIVER_TYPE_40G_SR4:
2319  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2320  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2321  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2322  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2323  		break;
2324  	case ETH_TRANSCEIVER_TYPE_100G_AOC:
2325  	case ETH_TRANSCEIVER_TYPE_100G_SR4:
2326  	case ETH_TRANSCEIVER_TYPE_100G_LR4:
2327  	case ETH_TRANSCEIVER_TYPE_100G_ER4:
2328  	case ETH_TRANSCEIVER_TYPE_100G_ACC:
2329  		*p_speed_mask =
2330  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2331  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2332  		break;
2333  	case ETH_TRANSCEIVER_TYPE_25G_SR:
2334  	case ETH_TRANSCEIVER_TYPE_25G_LR:
2335  	case ETH_TRANSCEIVER_TYPE_25G_AOC:
2336  	case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2337  	case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2338  	case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2339  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2340  		break;
2341  	case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2342  	case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2343  	case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2344  	case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2345  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2346  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2347  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2348  		break;
2349  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2350  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
2351  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2352  				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2353  		break;
2354  	case ETH_TRANSCEIVER_TYPE_40G_CR4:
2355  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2356  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2357  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2358  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2359  		break;
2360  	case ETH_TRANSCEIVER_TYPE_100G_CR4:
2361  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2362  		*p_speed_mask =
2363  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2364  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2365  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2366  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2367  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2368  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2369  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2370  		break;
2371  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2372  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2373  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2374  		*p_speed_mask =
2375  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2376  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2377  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2378  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2379  		break;
2380  	case ETH_TRANSCEIVER_TYPE_XLPPI:
2381  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2382  		break;
2383  	case ETH_TRANSCEIVER_TYPE_10G_BASET:
2384  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
2385  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
2386  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2387  				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2388  		break;
2389  	default:
2390  		DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2391  			transceiver_type);
2392  		*p_speed_mask = 0xff;
2393  		break;
2394  	}
2395  
2396  	return 0;
2397  }
2398  
qed_mcp_get_board_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_board_config)2399  int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2400  			     struct qed_ptt *p_ptt, u32 *p_board_config)
2401  {
2402  	u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2403  
2404  	if (IS_VF(p_hwfn->cdev))
2405  		return -EINVAL;
2406  
2407  	if (!qed_mcp_is_init(p_hwfn)) {
2408  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2409  		return -EBUSY;
2410  	}
2411  	if (!p_ptt) {
2412  		*p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2413  		return -EINVAL;
2414  	}
2415  
2416  	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2417  	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2418  	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2419  			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2420  	*p_board_config = qed_rd(p_hwfn, p_ptt,
2421  				 port_cfg_addr +
2422  				 offsetof(struct nvm_cfg1_port,
2423  					  board_cfg));
2424  
2425  	return 0;
2426  }
2427  
2428  /* Old MFW has a global configuration for all PFs regarding RDMA support */
2429  static void
qed_mcp_get_shmem_proto_legacy(struct qed_hwfn * p_hwfn,enum qed_pci_personality * p_proto)2430  qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2431  			       enum qed_pci_personality *p_proto)
2432  {
2433  	/* There wasn't ever a legacy MFW that published iwarp.
2434  	 * So at this point, this is either plain l2 or RoCE.
2435  	 */
2436  	if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2437  		*p_proto = QED_PCI_ETH_ROCE;
2438  	else
2439  		*p_proto = QED_PCI_ETH;
2440  
2441  	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2442  		   "According to Legacy capabilities, L2 personality is %08x\n",
2443  		   (u32)*p_proto);
2444  }
2445  
2446  static int
qed_mcp_get_shmem_proto_mfw(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_pci_personality * p_proto)2447  qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2448  			    struct qed_ptt *p_ptt,
2449  			    enum qed_pci_personality *p_proto)
2450  {
2451  	u32 resp = 0, param = 0;
2452  	int rc;
2453  
2454  	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2455  			 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2456  	if (rc)
2457  		return rc;
2458  	if (resp != FW_MSG_CODE_OK) {
2459  		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2460  			   "MFW lacks support for command; Returns %08x\n",
2461  			   resp);
2462  		return -EINVAL;
2463  	}
2464  
2465  	switch (param) {
2466  	case FW_MB_PARAM_GET_PF_RDMA_NONE:
2467  		*p_proto = QED_PCI_ETH;
2468  		break;
2469  	case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2470  		*p_proto = QED_PCI_ETH_ROCE;
2471  		break;
2472  	case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2473  		*p_proto = QED_PCI_ETH_IWARP;
2474  		break;
2475  	case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2476  		*p_proto = QED_PCI_ETH_RDMA;
2477  		break;
2478  	default:
2479  		DP_NOTICE(p_hwfn,
2480  			  "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2481  			  param);
2482  		return -EINVAL;
2483  	}
2484  
2485  	DP_VERBOSE(p_hwfn,
2486  		   NETIF_MSG_IFUP,
2487  		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2488  		   (u32)*p_proto, resp, param);
2489  	return 0;
2490  }
2491  
2492  static int
qed_mcp_get_shmem_proto(struct qed_hwfn * p_hwfn,struct public_func * p_info,struct qed_ptt * p_ptt,enum qed_pci_personality * p_proto)2493  qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2494  			struct public_func *p_info,
2495  			struct qed_ptt *p_ptt,
2496  			enum qed_pci_personality *p_proto)
2497  {
2498  	int rc = 0;
2499  
2500  	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2501  	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2502  		if (!IS_ENABLED(CONFIG_QED_RDMA))
2503  			*p_proto = QED_PCI_ETH;
2504  		else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2505  			qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2506  		break;
2507  	case FUNC_MF_CFG_PROTOCOL_ISCSI:
2508  		*p_proto = QED_PCI_ISCSI;
2509  		break;
2510  	case FUNC_MF_CFG_PROTOCOL_FCOE:
2511  		*p_proto = QED_PCI_FCOE;
2512  		break;
2513  	case FUNC_MF_CFG_PROTOCOL_ROCE:
2514  		DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2515  		fallthrough;
2516  	default:
2517  		rc = -EINVAL;
2518  	}
2519  
2520  	return rc;
2521  }
2522  
qed_mcp_fill_shmem_func_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2523  int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2524  				 struct qed_ptt *p_ptt)
2525  {
2526  	struct qed_mcp_function_info *info;
2527  	struct public_func shmem_info;
2528  
2529  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2530  	info = &p_hwfn->mcp_info->func_info;
2531  
2532  	info->pause_on_host = (shmem_info.config &
2533  			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2534  
2535  	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2536  				    &info->protocol)) {
2537  		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2538  		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2539  		return -EINVAL;
2540  	}
2541  
2542  	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2543  
2544  	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2545  		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2546  		info->mac[1] = (u8)(shmem_info.mac_upper);
2547  		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2548  		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2549  		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2550  		info->mac[5] = (u8)(shmem_info.mac_lower);
2551  
2552  		/* Store primary MAC for later possible WoL */
2553  		memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2554  	} else {
2555  		DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2556  	}
2557  
2558  	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2559  			 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2560  	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2561  			 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2562  
2563  	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2564  
2565  	info->mtu = (u16)shmem_info.mtu_size;
2566  
2567  	p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2568  	p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2569  	if (qed_mcp_is_init(p_hwfn)) {
2570  		u32 resp = 0, param = 0;
2571  		int rc;
2572  
2573  		rc = qed_mcp_cmd(p_hwfn, p_ptt,
2574  				 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2575  		if (rc)
2576  			return rc;
2577  		if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2578  			p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2579  	}
2580  
2581  	DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2582  		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
2583  		info->pause_on_host, info->protocol,
2584  		info->bandwidth_min, info->bandwidth_max,
2585  		info->mac,
2586  		info->wwn_port, info->wwn_node,
2587  		info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2588  
2589  	return 0;
2590  }
2591  
2592  struct qed_mcp_link_params
qed_mcp_get_link_params(struct qed_hwfn * p_hwfn)2593  *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2594  {
2595  	if (!p_hwfn || !p_hwfn->mcp_info)
2596  		return NULL;
2597  	return &p_hwfn->mcp_info->link_input;
2598  }
2599  
2600  struct qed_mcp_link_state
qed_mcp_get_link_state(struct qed_hwfn * p_hwfn)2601  *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2602  {
2603  	if (!p_hwfn || !p_hwfn->mcp_info)
2604  		return NULL;
2605  	return &p_hwfn->mcp_info->link_output;
2606  }
2607  
2608  struct qed_mcp_link_capabilities
qed_mcp_get_link_capabilities(struct qed_hwfn * p_hwfn)2609  *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2610  {
2611  	if (!p_hwfn || !p_hwfn->mcp_info)
2612  		return NULL;
2613  	return &p_hwfn->mcp_info->link_capabilities;
2614  }
2615  
qed_mcp_drain(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2616  int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2617  {
2618  	u32 resp = 0, param = 0;
2619  	int rc;
2620  
2621  	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2622  			 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2623  
2624  	/* Wait for the drain to complete before returning */
2625  	msleep(1020);
2626  
2627  	return rc;
2628  }
2629  
qed_mcp_get_flash_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_flash_size)2630  int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2631  			   struct qed_ptt *p_ptt, u32 *p_flash_size)
2632  {
2633  	u32 flash_size;
2634  
2635  	if (IS_VF(p_hwfn->cdev))
2636  		return -EINVAL;
2637  
2638  	flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2639  	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2640  		      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2641  	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2642  
2643  	*p_flash_size = flash_size;
2644  
2645  	return 0;
2646  }
2647  
qed_start_recovery_process(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2648  int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2649  {
2650  	struct qed_dev *cdev = p_hwfn->cdev;
2651  
2652  	if (cdev->recov_in_prog) {
2653  		DP_NOTICE(p_hwfn,
2654  			  "Avoid triggering a recovery since such a process is already in progress\n");
2655  		return -EAGAIN;
2656  	}
2657  
2658  	DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2659  	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2660  
2661  	return 0;
2662  }
2663  
2664  #define QED_RECOVERY_PROLOG_SLEEP_MS    100
2665  
qed_recovery_prolog(struct qed_dev * cdev)2666  int qed_recovery_prolog(struct qed_dev *cdev)
2667  {
2668  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2669  	struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2670  	int rc;
2671  
2672  	/* Allow ongoing PCIe transactions to complete */
2673  	msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2674  
2675  	/* Clear the PF's internal FID_enable in the PXP */
2676  	rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2677  	if (rc)
2678  		DP_NOTICE(p_hwfn,
2679  			  "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2680  			  rc);
2681  
2682  	return rc;
2683  }
2684  
2685  static int
qed_mcp_config_vf_msix_bb(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 vf_id,u8 num)2686  qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2687  			  struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2688  {
2689  	u32 resp = 0, param = 0, rc_param = 0;
2690  	int rc;
2691  
2692  	/* Only Leader can configure MSIX, and need to take CMT into account */
2693  	if (!IS_LEAD_HWFN(p_hwfn))
2694  		return 0;
2695  	num *= p_hwfn->cdev->num_hwfns;
2696  
2697  	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2698  		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2699  	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2700  		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2701  
2702  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2703  			 &resp, &rc_param);
2704  
2705  	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2706  		DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2707  		rc = -EINVAL;
2708  	} else {
2709  		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2710  			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2711  			   num, vf_id);
2712  	}
2713  
2714  	return rc;
2715  }
2716  
2717  static int
qed_mcp_config_vf_msix_ah(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 num)2718  qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2719  			  struct qed_ptt *p_ptt, u8 num)
2720  {
2721  	u32 resp = 0, param = num, rc_param = 0;
2722  	int rc;
2723  
2724  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2725  			 param, &resp, &rc_param);
2726  
2727  	if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2728  		DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2729  		rc = -EINVAL;
2730  	} else {
2731  		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2732  			   "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2733  	}
2734  
2735  	return rc;
2736  }
2737  
qed_mcp_config_vf_msix(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 vf_id,u8 num)2738  int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2739  			   struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2740  {
2741  	if (QED_IS_BB(p_hwfn->cdev))
2742  		return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2743  	else
2744  		return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2745  }
2746  
2747  int
qed_mcp_send_drv_version(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_drv_version * p_ver)2748  qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2749  			 struct qed_ptt *p_ptt,
2750  			 struct qed_mcp_drv_version *p_ver)
2751  {
2752  	struct qed_mcp_mb_params mb_params;
2753  	struct drv_version_stc drv_version;
2754  	__be32 val;
2755  	u32 i;
2756  	int rc;
2757  
2758  	memset(&drv_version, 0, sizeof(drv_version));
2759  	drv_version.version = p_ver->version;
2760  	for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2761  		val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2762  		*(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2763  	}
2764  
2765  	memset(&mb_params, 0, sizeof(mb_params));
2766  	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2767  	mb_params.p_data_src = &drv_version;
2768  	mb_params.data_src_size = sizeof(drv_version);
2769  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2770  	if (rc)
2771  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2772  
2773  	return rc;
2774  }
2775  
2776  /* A maximal 100 msec waiting time for the MCP to halt */
2777  #define QED_MCP_HALT_SLEEP_MS		10
2778  #define QED_MCP_HALT_MAX_RETRIES	10
2779  
qed_mcp_halt(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2780  int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2781  {
2782  	u32 resp = 0, param = 0, cpu_state, cnt = 0;
2783  	int rc;
2784  
2785  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2786  			 &param);
2787  	if (rc) {
2788  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2789  		return rc;
2790  	}
2791  
2792  	do {
2793  		msleep(QED_MCP_HALT_SLEEP_MS);
2794  		cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2795  		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2796  			break;
2797  	} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2798  
2799  	if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2800  		DP_NOTICE(p_hwfn,
2801  			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2802  			  qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2803  		return -EBUSY;
2804  	}
2805  
2806  	qed_mcp_cmd_set_blocking(p_hwfn, true);
2807  
2808  	return 0;
2809  }
2810  
2811  #define QED_MCP_RESUME_SLEEP_MS	10
2812  
qed_mcp_resume(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2813  int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2814  {
2815  	u32 cpu_mode, cpu_state;
2816  
2817  	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2818  
2819  	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2820  	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2821  	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2822  	msleep(QED_MCP_RESUME_SLEEP_MS);
2823  	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2824  
2825  	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2826  		DP_NOTICE(p_hwfn,
2827  			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2828  			  cpu_mode, cpu_state);
2829  		return -EBUSY;
2830  	}
2831  
2832  	qed_mcp_cmd_set_blocking(p_hwfn, false);
2833  
2834  	return 0;
2835  }
2836  
qed_mcp_ov_update_current_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_client client)2837  int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2838  				     struct qed_ptt *p_ptt,
2839  				     enum qed_ov_client client)
2840  {
2841  	u32 resp = 0, param = 0;
2842  	u32 drv_mb_param;
2843  	int rc;
2844  
2845  	switch (client) {
2846  	case QED_OV_CLIENT_DRV:
2847  		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2848  		break;
2849  	case QED_OV_CLIENT_USER:
2850  		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2851  		break;
2852  	case QED_OV_CLIENT_VENDOR_SPEC:
2853  		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2854  		break;
2855  	default:
2856  		DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2857  		return -EINVAL;
2858  	}
2859  
2860  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2861  			 drv_mb_param, &resp, &param);
2862  	if (rc)
2863  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2864  
2865  	return rc;
2866  }
2867  
qed_mcp_ov_update_driver_state(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_driver_state drv_state)2868  int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2869  				   struct qed_ptt *p_ptt,
2870  				   enum qed_ov_driver_state drv_state)
2871  {
2872  	u32 resp = 0, param = 0;
2873  	u32 drv_mb_param;
2874  	int rc;
2875  
2876  	switch (drv_state) {
2877  	case QED_OV_DRIVER_STATE_NOT_LOADED:
2878  		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2879  		break;
2880  	case QED_OV_DRIVER_STATE_DISABLED:
2881  		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2882  		break;
2883  	case QED_OV_DRIVER_STATE_ACTIVE:
2884  		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2885  		break;
2886  	default:
2887  		DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2888  		return -EINVAL;
2889  	}
2890  
2891  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2892  			 drv_mb_param, &resp, &param);
2893  	if (rc)
2894  		DP_ERR(p_hwfn, "Failed to send driver state\n");
2895  
2896  	return rc;
2897  }
2898  
qed_mcp_ov_update_mtu(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 mtu)2899  int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2900  			  struct qed_ptt *p_ptt, u16 mtu)
2901  {
2902  	u32 resp = 0, param = 0;
2903  	u32 drv_mb_param;
2904  	int rc;
2905  
2906  	drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2907  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2908  			 drv_mb_param, &resp, &param);
2909  	if (rc)
2910  		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2911  
2912  	return rc;
2913  }
2914  
qed_mcp_ov_update_mac(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,const u8 * mac)2915  int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2916  			  struct qed_ptt *p_ptt, const u8 *mac)
2917  {
2918  	struct qed_mcp_mb_params mb_params;
2919  	u32 mfw_mac[2];
2920  	int rc;
2921  
2922  	memset(&mb_params, 0, sizeof(mb_params));
2923  	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2924  	mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2925  			  DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2926  	mb_params.param |= MCP_PF_ID(p_hwfn);
2927  
2928  	/* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2929  	 * in 32-bit granularity.
2930  	 * So the MAC has to be set in native order [and not byte order],
2931  	 * otherwise it would be read incorrectly by MFW after swap.
2932  	 */
2933  	mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2934  	mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2935  
2936  	mb_params.p_data_src = (u8 *)mfw_mac;
2937  	mb_params.data_src_size = 8;
2938  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2939  	if (rc)
2940  		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2941  
2942  	/* Store primary MAC for later possible WoL */
2943  	memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2944  
2945  	return rc;
2946  }
2947  
qed_mcp_ov_update_wol(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_wol wol)2948  int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2949  			  struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2950  {
2951  	u32 resp = 0, param = 0;
2952  	u32 drv_mb_param;
2953  	int rc;
2954  
2955  	if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2956  		DP_VERBOSE(p_hwfn, QED_MSG_SP,
2957  			   "Can't change WoL configuration when WoL isn't supported\n");
2958  		return -EINVAL;
2959  	}
2960  
2961  	switch (wol) {
2962  	case QED_OV_WOL_DEFAULT:
2963  		drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2964  		break;
2965  	case QED_OV_WOL_DISABLED:
2966  		drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2967  		break;
2968  	case QED_OV_WOL_ENABLED:
2969  		drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2970  		break;
2971  	default:
2972  		DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2973  		return -EINVAL;
2974  	}
2975  
2976  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2977  			 drv_mb_param, &resp, &param);
2978  	if (rc)
2979  		DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2980  
2981  	/* Store the WoL update for a future unload */
2982  	p_hwfn->cdev->wol_config = (u8)wol;
2983  
2984  	return rc;
2985  }
2986  
qed_mcp_ov_update_eswitch(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_eswitch eswitch)2987  int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2988  			      struct qed_ptt *p_ptt,
2989  			      enum qed_ov_eswitch eswitch)
2990  {
2991  	u32 resp = 0, param = 0;
2992  	u32 drv_mb_param;
2993  	int rc;
2994  
2995  	switch (eswitch) {
2996  	case QED_OV_ESWITCH_NONE:
2997  		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2998  		break;
2999  	case QED_OV_ESWITCH_VEB:
3000  		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
3001  		break;
3002  	case QED_OV_ESWITCH_VEPA:
3003  		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
3004  		break;
3005  	default:
3006  		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
3007  		return -EINVAL;
3008  	}
3009  
3010  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
3011  			 drv_mb_param, &resp, &param);
3012  	if (rc)
3013  		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
3014  
3015  	return rc;
3016  }
3017  
qed_mcp_set_led(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_led_mode mode)3018  int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
3019  		    struct qed_ptt *p_ptt, enum qed_led_mode mode)
3020  {
3021  	u32 resp = 0, param = 0, drv_mb_param;
3022  	int rc;
3023  
3024  	switch (mode) {
3025  	case QED_LED_MODE_ON:
3026  		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
3027  		break;
3028  	case QED_LED_MODE_OFF:
3029  		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
3030  		break;
3031  	case QED_LED_MODE_RESTORE:
3032  		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
3033  		break;
3034  	default:
3035  		DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
3036  		return -EINVAL;
3037  	}
3038  
3039  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
3040  			 drv_mb_param, &resp, &param);
3041  
3042  	return rc;
3043  }
3044  
qed_mcp_mask_parities(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 mask_parities)3045  int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
3046  			  struct qed_ptt *p_ptt, u32 mask_parities)
3047  {
3048  	u32 resp = 0, param = 0;
3049  	int rc;
3050  
3051  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3052  			 mask_parities, &resp, &param);
3053  
3054  	if (rc) {
3055  		DP_ERR(p_hwfn,
3056  		       "MCP response failure for mask parities, aborting\n");
3057  	} else if (resp != FW_MSG_CODE_OK) {
3058  		DP_ERR(p_hwfn,
3059  		       "MCP did not acknowledge mask parity request. Old MFW?\n");
3060  		rc = -EINVAL;
3061  	}
3062  
3063  	return rc;
3064  }
3065  
qed_mcp_nvm_read(struct qed_dev * cdev,u32 addr,u8 * p_buf,u32 len)3066  int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
3067  {
3068  	u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
3069  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3070  	u32 resp = 0, resp_param = 0;
3071  	struct qed_ptt *p_ptt;
3072  	int rc = 0;
3073  
3074  	p_ptt = qed_ptt_acquire(p_hwfn);
3075  	if (!p_ptt)
3076  		return -EBUSY;
3077  
3078  	while (bytes_left > 0) {
3079  		bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
3080  
3081  		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3082  					DRV_MSG_CODE_NVM_READ_NVRAM,
3083  					addr + offset +
3084  					(bytes_to_copy <<
3085  					 DRV_MB_PARAM_NVM_LEN_OFFSET),
3086  					&resp, &resp_param,
3087  					&read_len,
3088  					(u32 *)(p_buf + offset), false);
3089  
3090  		if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
3091  			DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
3092  			break;
3093  		}
3094  
3095  		/* This can be a lengthy process, and it's possible scheduler
3096  		 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3097  		 */
3098  		if (bytes_left % 0x1000 <
3099  		    (bytes_left - read_len) % 0x1000)
3100  			usleep_range(1000, 2000);
3101  
3102  		offset += read_len;
3103  		bytes_left -= read_len;
3104  	}
3105  
3106  	cdev->mcp_nvm_resp = resp;
3107  	qed_ptt_release(p_hwfn, p_ptt);
3108  
3109  	return rc;
3110  }
3111  
qed_mcp_nvm_resp(struct qed_dev * cdev,u8 * p_buf)3112  int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
3113  {
3114  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3115  	struct qed_ptt *p_ptt;
3116  
3117  	p_ptt = qed_ptt_acquire(p_hwfn);
3118  	if (!p_ptt)
3119  		return -EBUSY;
3120  
3121  	memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
3122  	qed_ptt_release(p_hwfn, p_ptt);
3123  
3124  	return 0;
3125  }
3126  
qed_mcp_nvm_write(struct qed_dev * cdev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3127  int qed_mcp_nvm_write(struct qed_dev *cdev,
3128  		      u32 cmd, u32 addr, u8 *p_buf, u32 len)
3129  {
3130  	u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
3131  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3132  	struct qed_ptt *p_ptt;
3133  	int rc = -EINVAL;
3134  
3135  	p_ptt = qed_ptt_acquire(p_hwfn);
3136  	if (!p_ptt)
3137  		return -EBUSY;
3138  
3139  	switch (cmd) {
3140  	case QED_PUT_FILE_BEGIN:
3141  		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
3142  		break;
3143  	case QED_PUT_FILE_DATA:
3144  		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3145  		break;
3146  	case QED_NVM_WRITE_NVRAM:
3147  		nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3148  		break;
3149  	default:
3150  		DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
3151  		rc = -EINVAL;
3152  		goto out;
3153  	}
3154  
3155  	buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
3156  	while (buf_idx < len) {
3157  		if (cmd == QED_PUT_FILE_BEGIN)
3158  			nvm_offset = addr;
3159  		else
3160  			nvm_offset = ((buf_size <<
3161  				       DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3162  				       buf_idx;
3163  		rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3164  					&resp, &param, buf_size,
3165  					(u32 *)&p_buf[buf_idx]);
3166  		if (rc) {
3167  			DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
3168  			resp = FW_MSG_CODE_ERROR;
3169  			break;
3170  		}
3171  
3172  		if (resp != FW_MSG_CODE_OK &&
3173  		    resp != FW_MSG_CODE_NVM_OK &&
3174  		    resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3175  			DP_NOTICE(cdev,
3176  				  "nvm write failed, resp = 0x%08x\n", resp);
3177  			rc = -EINVAL;
3178  			break;
3179  		}
3180  
3181  		/* This can be a lengthy process, and it's possible scheduler
3182  		 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3183  		 */
3184  		if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
3185  			usleep_range(1000, 2000);
3186  
3187  		/* For MBI upgrade, MFW response includes the next buffer offset
3188  		 * to be delivered to MFW.
3189  		 */
3190  		if (param && cmd == QED_PUT_FILE_DATA) {
3191  			buf_idx =
3192  			QED_MFW_GET_FIELD(param,
3193  					  FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3194  			buf_size =
3195  			QED_MFW_GET_FIELD(param,
3196  					  FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3197  		} else {
3198  			buf_idx += buf_size;
3199  			buf_size = min_t(u32, (len - buf_idx),
3200  					 MCP_DRV_NVM_BUF_LEN);
3201  		}
3202  	}
3203  
3204  	cdev->mcp_nvm_resp = resp;
3205  out:
3206  	qed_ptt_release(p_hwfn, p_ptt);
3207  
3208  	return rc;
3209  }
3210  
qed_mcp_phy_sfp_read(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)3211  int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3212  			 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
3213  {
3214  	u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
3215  	u32 resp, param;
3216  	int rc;
3217  
3218  	nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
3219  		       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
3220  	nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
3221  		       DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
3222  
3223  	addr = offset;
3224  	offset = 0;
3225  	bytes_left = len;
3226  	while (bytes_left > 0) {
3227  		bytes_to_copy = min_t(u32, bytes_left,
3228  				      MAX_I2C_TRANSACTION_SIZE);
3229  		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3230  			       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3231  		nvm_offset |= ((addr + offset) <<
3232  			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
3233  			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
3234  		nvm_offset |= (bytes_to_copy <<
3235  			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
3236  			       DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
3237  		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3238  					DRV_MSG_CODE_TRANSCEIVER_READ,
3239  					nvm_offset, &resp, &param, &buf_size,
3240  					(u32 *)(p_buf + offset), true);
3241  		if (rc) {
3242  			DP_NOTICE(p_hwfn,
3243  				  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3244  				  rc);
3245  			return rc;
3246  		}
3247  
3248  		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3249  			return -ENODEV;
3250  		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3251  			return -EINVAL;
3252  
3253  		offset += buf_size;
3254  		bytes_left -= buf_size;
3255  	}
3256  
3257  	return 0;
3258  }
3259  
qed_mcp_bist_register_test(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3260  int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3261  {
3262  	u32 drv_mb_param = 0, rsp, param;
3263  	int rc = 0;
3264  
3265  	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3266  			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3267  
3268  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3269  			 drv_mb_param, &rsp, &param);
3270  
3271  	if (rc)
3272  		return rc;
3273  
3274  	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3275  	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3276  		rc = -EAGAIN;
3277  
3278  	return rc;
3279  }
3280  
qed_mcp_bist_clock_test(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3281  int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3282  {
3283  	u32 drv_mb_param, rsp, param;
3284  	int rc = 0;
3285  
3286  	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3287  			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3288  
3289  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3290  			 drv_mb_param, &rsp, &param);
3291  
3292  	if (rc)
3293  		return rc;
3294  
3295  	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3296  	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3297  		rc = -EAGAIN;
3298  
3299  	return rc;
3300  }
3301  
qed_mcp_bist_nvm_get_num_images(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * num_images)3302  int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3303  				    struct qed_ptt *p_ptt,
3304  				    u32 *num_images)
3305  {
3306  	u32 drv_mb_param = 0, rsp;
3307  	int rc = 0;
3308  
3309  	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3310  			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3311  
3312  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3313  			 drv_mb_param, &rsp, num_images);
3314  	if (rc)
3315  		return rc;
3316  
3317  	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3318  		rc = -EINVAL;
3319  
3320  	return rc;
3321  }
3322  
qed_mcp_bist_nvm_get_image_att(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct bist_nvm_image_att * p_image_att,u32 image_index)3323  int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3324  				   struct qed_ptt *p_ptt,
3325  				   struct bist_nvm_image_att *p_image_att,
3326  				   u32 image_index)
3327  {
3328  	u32 buf_size = 0, param, resp = 0, resp_param = 0;
3329  	int rc;
3330  
3331  	param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3332  		DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3333  	param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3334  
3335  	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3336  				DRV_MSG_CODE_BIST_TEST, param,
3337  				&resp, &resp_param,
3338  				&buf_size,
3339  				(u32 *)p_image_att, false);
3340  	if (rc)
3341  		return rc;
3342  
3343  	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3344  	    (p_image_att->return_code != 1))
3345  		rc = -EINVAL;
3346  
3347  	return rc;
3348  }
3349  
qed_mcp_nvm_info_populate(struct qed_hwfn * p_hwfn)3350  int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3351  {
3352  	struct qed_nvm_image_info nvm_info;
3353  	struct qed_ptt *p_ptt;
3354  	int rc;
3355  	u32 i;
3356  
3357  	if (p_hwfn->nvm_info.valid)
3358  		return 0;
3359  
3360  	p_ptt = qed_ptt_acquire(p_hwfn);
3361  	if (!p_ptt) {
3362  		DP_ERR(p_hwfn, "failed to acquire ptt\n");
3363  		return -EBUSY;
3364  	}
3365  
3366  	/* Acquire from MFW the amount of available images */
3367  	nvm_info.num_images = 0;
3368  	rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3369  					     p_ptt, &nvm_info.num_images);
3370  	if (rc == -EOPNOTSUPP) {
3371  		DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3372  		goto out;
3373  	} else if (rc || !nvm_info.num_images) {
3374  		DP_ERR(p_hwfn, "Failed getting number of images\n");
3375  		goto err0;
3376  	}
3377  
3378  	nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3379  					   sizeof(struct bist_nvm_image_att),
3380  					   GFP_KERNEL);
3381  	if (!nvm_info.image_att) {
3382  		rc = -ENOMEM;
3383  		goto err0;
3384  	}
3385  
3386  	/* Iterate over images and get their attributes */
3387  	for (i = 0; i < nvm_info.num_images; i++) {
3388  		rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3389  						    &nvm_info.image_att[i], i);
3390  		if (rc) {
3391  			DP_ERR(p_hwfn,
3392  			       "Failed getting image index %d attributes\n", i);
3393  			goto err1;
3394  		}
3395  
3396  		DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3397  			   nvm_info.image_att[i].len);
3398  	}
3399  out:
3400  	/* Update hwfn's nvm_info */
3401  	if (nvm_info.num_images) {
3402  		p_hwfn->nvm_info.num_images = nvm_info.num_images;
3403  		kfree(p_hwfn->nvm_info.image_att);
3404  		p_hwfn->nvm_info.image_att = nvm_info.image_att;
3405  		p_hwfn->nvm_info.valid = true;
3406  	}
3407  
3408  	qed_ptt_release(p_hwfn, p_ptt);
3409  	return 0;
3410  
3411  err1:
3412  	kfree(nvm_info.image_att);
3413  err0:
3414  	qed_ptt_release(p_hwfn, p_ptt);
3415  	return rc;
3416  }
3417  
qed_mcp_nvm_info_free(struct qed_hwfn * p_hwfn)3418  void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
3419  {
3420  	kfree(p_hwfn->nvm_info.image_att);
3421  	p_hwfn->nvm_info.image_att = NULL;
3422  	p_hwfn->nvm_info.valid = false;
3423  }
3424  
3425  int
qed_mcp_get_nvm_image_att(struct qed_hwfn * p_hwfn,enum qed_nvm_images image_id,struct qed_nvm_image_att * p_image_att)3426  qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3427  			  enum qed_nvm_images image_id,
3428  			  struct qed_nvm_image_att *p_image_att)
3429  {
3430  	enum nvm_image_type type;
3431  	int rc;
3432  	u32 i;
3433  
3434  	/* Translate image_id into MFW definitions */
3435  	switch (image_id) {
3436  	case QED_NVM_IMAGE_ISCSI_CFG:
3437  		type = NVM_TYPE_ISCSI_CFG;
3438  		break;
3439  	case QED_NVM_IMAGE_FCOE_CFG:
3440  		type = NVM_TYPE_FCOE_CFG;
3441  		break;
3442  	case QED_NVM_IMAGE_MDUMP:
3443  		type = NVM_TYPE_MDUMP;
3444  		break;
3445  	case QED_NVM_IMAGE_NVM_CFG1:
3446  		type = NVM_TYPE_NVM_CFG1;
3447  		break;
3448  	case QED_NVM_IMAGE_DEFAULT_CFG:
3449  		type = NVM_TYPE_DEFAULT_CFG;
3450  		break;
3451  	case QED_NVM_IMAGE_NVM_META:
3452  		type = NVM_TYPE_NVM_META;
3453  		break;
3454  	default:
3455  		DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3456  			  image_id);
3457  		return -EINVAL;
3458  	}
3459  
3460  	rc = qed_mcp_nvm_info_populate(p_hwfn);
3461  	if (rc)
3462  		return rc;
3463  
3464  	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3465  		if (type == p_hwfn->nvm_info.image_att[i].image_type)
3466  			break;
3467  	if (i == p_hwfn->nvm_info.num_images) {
3468  		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3469  			   "Failed to find nvram image of type %08x\n",
3470  			   image_id);
3471  		return -ENOENT;
3472  	}
3473  
3474  	p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3475  	p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3476  
3477  	return 0;
3478  }
3479  
qed_mcp_get_nvm_image(struct qed_hwfn * p_hwfn,enum qed_nvm_images image_id,u8 * p_buffer,u32 buffer_len)3480  int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3481  			  enum qed_nvm_images image_id,
3482  			  u8 *p_buffer, u32 buffer_len)
3483  {
3484  	struct qed_nvm_image_att image_att;
3485  	int rc;
3486  
3487  	memset(p_buffer, 0, buffer_len);
3488  
3489  	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3490  	if (rc)
3491  		return rc;
3492  
3493  	/* Validate sizes - both the image's and the supplied buffer's */
3494  	if (image_att.length <= 4) {
3495  		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3496  			   "Image [%d] is too small - only %d bytes\n",
3497  			   image_id, image_att.length);
3498  		return -EINVAL;
3499  	}
3500  
3501  	if (image_att.length > buffer_len) {
3502  		DP_VERBOSE(p_hwfn,
3503  			   QED_MSG_STORAGE,
3504  			   "Image [%d] is too big - %08x bytes where only %08x are available\n",
3505  			   image_id, image_att.length, buffer_len);
3506  		return -ENOMEM;
3507  	}
3508  
3509  	return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3510  				p_buffer, image_att.length);
3511  }
3512  
qed_mcp_get_mfw_res_id(enum qed_resources res_id)3513  static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3514  {
3515  	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3516  
3517  	switch (res_id) {
3518  	case QED_SB:
3519  		mfw_res_id = RESOURCE_NUM_SB_E;
3520  		break;
3521  	case QED_L2_QUEUE:
3522  		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3523  		break;
3524  	case QED_VPORT:
3525  		mfw_res_id = RESOURCE_NUM_VPORT_E;
3526  		break;
3527  	case QED_RSS_ENG:
3528  		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3529  		break;
3530  	case QED_PQ:
3531  		mfw_res_id = RESOURCE_NUM_PQ_E;
3532  		break;
3533  	case QED_RL:
3534  		mfw_res_id = RESOURCE_NUM_RL_E;
3535  		break;
3536  	case QED_MAC:
3537  	case QED_VLAN:
3538  		/* Each VFC resource can accommodate both a MAC and a VLAN */
3539  		mfw_res_id = RESOURCE_VFC_FILTER_E;
3540  		break;
3541  	case QED_ILT:
3542  		mfw_res_id = RESOURCE_ILT_E;
3543  		break;
3544  	case QED_LL2_RAM_QUEUE:
3545  		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3546  		break;
3547  	case QED_LL2_CTX_QUEUE:
3548  		mfw_res_id = RESOURCE_LL2_CQS_E;
3549  		break;
3550  	case QED_RDMA_CNQ_RAM:
3551  	case QED_CMDQS_CQS:
3552  		/* CNQ/CMDQS are the same resource */
3553  		mfw_res_id = RESOURCE_CQS_E;
3554  		break;
3555  	case QED_RDMA_STATS_QUEUE:
3556  		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3557  		break;
3558  	case QED_BDQ:
3559  		mfw_res_id = RESOURCE_BDQ_E;
3560  		break;
3561  	default:
3562  		break;
3563  	}
3564  
3565  	return mfw_res_id;
3566  }
3567  
3568  #define QED_RESC_ALLOC_VERSION_MAJOR    2
3569  #define QED_RESC_ALLOC_VERSION_MINOR    0
3570  #define QED_RESC_ALLOC_VERSION				     \
3571  	((QED_RESC_ALLOC_VERSION_MAJOR <<		     \
3572  	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3573  	 (QED_RESC_ALLOC_VERSION_MINOR <<		     \
3574  	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3575  
3576  struct qed_resc_alloc_in_params {
3577  	u32 cmd;
3578  	enum qed_resources res_id;
3579  	u32 resc_max_val;
3580  };
3581  
3582  struct qed_resc_alloc_out_params {
3583  	u32 mcp_resp;
3584  	u32 mcp_param;
3585  	u32 resc_num;
3586  	u32 resc_start;
3587  	u32 vf_resc_num;
3588  	u32 vf_resc_start;
3589  	u32 flags;
3590  };
3591  
3592  static int
qed_mcp_resc_allocation_msg(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_alloc_in_params * p_in_params,struct qed_resc_alloc_out_params * p_out_params)3593  qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3594  			    struct qed_ptt *p_ptt,
3595  			    struct qed_resc_alloc_in_params *p_in_params,
3596  			    struct qed_resc_alloc_out_params *p_out_params)
3597  {
3598  	struct qed_mcp_mb_params mb_params;
3599  	struct resource_info mfw_resc_info;
3600  	int rc;
3601  
3602  	memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3603  
3604  	mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3605  	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3606  		DP_ERR(p_hwfn,
3607  		       "Failed to match resource %d [%s] with the MFW resources\n",
3608  		       p_in_params->res_id,
3609  		       qed_hw_get_resc_name(p_in_params->res_id));
3610  		return -EINVAL;
3611  	}
3612  
3613  	switch (p_in_params->cmd) {
3614  	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3615  		mfw_resc_info.size = p_in_params->resc_max_val;
3616  		fallthrough;
3617  	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3618  		break;
3619  	default:
3620  		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3621  		       p_in_params->cmd);
3622  		return -EINVAL;
3623  	}
3624  
3625  	memset(&mb_params, 0, sizeof(mb_params));
3626  	mb_params.cmd = p_in_params->cmd;
3627  	mb_params.param = QED_RESC_ALLOC_VERSION;
3628  	mb_params.p_data_src = &mfw_resc_info;
3629  	mb_params.data_src_size = sizeof(mfw_resc_info);
3630  	mb_params.p_data_dst = mb_params.p_data_src;
3631  	mb_params.data_dst_size = mb_params.data_src_size;
3632  
3633  	DP_VERBOSE(p_hwfn,
3634  		   QED_MSG_SP,
3635  		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3636  		   p_in_params->cmd,
3637  		   p_in_params->res_id,
3638  		   qed_hw_get_resc_name(p_in_params->res_id),
3639  		   QED_MFW_GET_FIELD(mb_params.param,
3640  				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3641  		   QED_MFW_GET_FIELD(mb_params.param,
3642  				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3643  		   p_in_params->resc_max_val);
3644  
3645  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3646  	if (rc)
3647  		return rc;
3648  
3649  	p_out_params->mcp_resp = mb_params.mcp_resp;
3650  	p_out_params->mcp_param = mb_params.mcp_param;
3651  	p_out_params->resc_num = mfw_resc_info.size;
3652  	p_out_params->resc_start = mfw_resc_info.offset;
3653  	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3654  	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3655  	p_out_params->flags = mfw_resc_info.flags;
3656  
3657  	DP_VERBOSE(p_hwfn,
3658  		   QED_MSG_SP,
3659  		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3660  		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3661  				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3662  		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3663  				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3664  		   p_out_params->resc_num,
3665  		   p_out_params->resc_start,
3666  		   p_out_params->vf_resc_num,
3667  		   p_out_params->vf_resc_start, p_out_params->flags);
3668  
3669  	return 0;
3670  }
3671  
3672  int
qed_mcp_set_resc_max_val(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_resources res_id,u32 resc_max_val,u32 * p_mcp_resp)3673  qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3674  			 struct qed_ptt *p_ptt,
3675  			 enum qed_resources res_id,
3676  			 u32 resc_max_val, u32 *p_mcp_resp)
3677  {
3678  	struct qed_resc_alloc_out_params out_params;
3679  	struct qed_resc_alloc_in_params in_params;
3680  	int rc;
3681  
3682  	memset(&in_params, 0, sizeof(in_params));
3683  	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3684  	in_params.res_id = res_id;
3685  	in_params.resc_max_val = resc_max_val;
3686  	memset(&out_params, 0, sizeof(out_params));
3687  	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3688  					 &out_params);
3689  	if (rc)
3690  		return rc;
3691  
3692  	*p_mcp_resp = out_params.mcp_resp;
3693  
3694  	return 0;
3695  }
3696  
3697  int
qed_mcp_get_resc_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_resources res_id,u32 * p_mcp_resp,u32 * p_resc_num,u32 * p_resc_start)3698  qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3699  		      struct qed_ptt *p_ptt,
3700  		      enum qed_resources res_id,
3701  		      u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3702  {
3703  	struct qed_resc_alloc_out_params out_params;
3704  	struct qed_resc_alloc_in_params in_params;
3705  	int rc;
3706  
3707  	memset(&in_params, 0, sizeof(in_params));
3708  	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3709  	in_params.res_id = res_id;
3710  	memset(&out_params, 0, sizeof(out_params));
3711  	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3712  					 &out_params);
3713  	if (rc)
3714  		return rc;
3715  
3716  	*p_mcp_resp = out_params.mcp_resp;
3717  
3718  	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3719  		*p_resc_num = out_params.resc_num;
3720  		*p_resc_start = out_params.resc_start;
3721  	}
3722  
3723  	return 0;
3724  }
3725  
qed_mcp_initiate_pf_flr(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3726  int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3727  {
3728  	u32 mcp_resp, mcp_param;
3729  
3730  	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3731  			   &mcp_resp, &mcp_param);
3732  }
3733  
qed_mcp_resource_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 param,u32 * p_mcp_resp,u32 * p_mcp_param)3734  static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3735  				struct qed_ptt *p_ptt,
3736  				u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3737  {
3738  	int rc;
3739  
3740  	rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD,
3741  				 param, p_mcp_resp, p_mcp_param);
3742  	if (rc)
3743  		return rc;
3744  
3745  	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3746  		DP_INFO(p_hwfn,
3747  			"The resource command is unsupported by the MFW\n");
3748  		return -EINVAL;
3749  	}
3750  
3751  	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3752  		u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3753  
3754  		DP_NOTICE(p_hwfn,
3755  			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3756  			  param, opcode);
3757  		return -EINVAL;
3758  	}
3759  
3760  	return rc;
3761  }
3762  
3763  static int
__qed_mcp_resc_lock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_lock_params * p_params)3764  __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3765  		    struct qed_ptt *p_ptt,
3766  		    struct qed_resc_lock_params *p_params)
3767  {
3768  	u32 param = 0, mcp_resp, mcp_param;
3769  	u8 opcode;
3770  	int rc;
3771  
3772  	switch (p_params->timeout) {
3773  	case QED_MCP_RESC_LOCK_TO_DEFAULT:
3774  		opcode = RESOURCE_OPCODE_REQ;
3775  		p_params->timeout = 0;
3776  		break;
3777  	case QED_MCP_RESC_LOCK_TO_NONE:
3778  		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3779  		p_params->timeout = 0;
3780  		break;
3781  	default:
3782  		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3783  		break;
3784  	}
3785  
3786  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3787  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3788  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3789  
3790  	DP_VERBOSE(p_hwfn,
3791  		   QED_MSG_SP,
3792  		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3793  		   param, p_params->timeout, opcode, p_params->resource);
3794  
3795  	/* Attempt to acquire the resource */
3796  	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3797  	if (rc)
3798  		return rc;
3799  
3800  	/* Analyze the response */
3801  	p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3802  	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3803  
3804  	DP_VERBOSE(p_hwfn,
3805  		   QED_MSG_SP,
3806  		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3807  		   mcp_param, opcode, p_params->owner);
3808  
3809  	switch (opcode) {
3810  	case RESOURCE_OPCODE_GNT:
3811  		p_params->b_granted = true;
3812  		break;
3813  	case RESOURCE_OPCODE_BUSY:
3814  		p_params->b_granted = false;
3815  		break;
3816  	default:
3817  		DP_NOTICE(p_hwfn,
3818  			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3819  			  mcp_param, opcode);
3820  		return -EINVAL;
3821  	}
3822  
3823  	return 0;
3824  }
3825  
3826  int
qed_mcp_resc_lock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_lock_params * p_params)3827  qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3828  		  struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3829  {
3830  	u32 retry_cnt = 0;
3831  	int rc;
3832  
3833  	do {
3834  		/* No need for an interval before the first iteration */
3835  		if (retry_cnt) {
3836  			if (p_params->sleep_b4_retry) {
3837  				u16 retry_interval_in_ms =
3838  				    DIV_ROUND_UP(p_params->retry_interval,
3839  						 1000);
3840  
3841  				msleep(retry_interval_in_ms);
3842  			} else {
3843  				udelay(p_params->retry_interval);
3844  			}
3845  		}
3846  
3847  		rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3848  		if (rc)
3849  			return rc;
3850  
3851  		if (p_params->b_granted)
3852  			break;
3853  	} while (retry_cnt++ < p_params->retry_num);
3854  
3855  	return 0;
3856  }
3857  
3858  int
qed_mcp_resc_unlock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_unlock_params * p_params)3859  qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3860  		    struct qed_ptt *p_ptt,
3861  		    struct qed_resc_unlock_params *p_params)
3862  {
3863  	u32 param = 0, mcp_resp, mcp_param;
3864  	u8 opcode;
3865  	int rc;
3866  
3867  	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3868  				   : RESOURCE_OPCODE_RELEASE;
3869  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3870  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3871  
3872  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3873  		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3874  		   param, opcode, p_params->resource);
3875  
3876  	/* Attempt to release the resource */
3877  	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3878  	if (rc)
3879  		return rc;
3880  
3881  	/* Analyze the response */
3882  	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3883  
3884  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3885  		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3886  		   mcp_param, opcode);
3887  
3888  	switch (opcode) {
3889  	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3890  		DP_INFO(p_hwfn,
3891  			"Resource unlock request for an already released resource [%d]\n",
3892  			p_params->resource);
3893  		fallthrough;
3894  	case RESOURCE_OPCODE_RELEASED:
3895  		p_params->b_released = true;
3896  		break;
3897  	case RESOURCE_OPCODE_WRONG_OWNER:
3898  		p_params->b_released = false;
3899  		break;
3900  	default:
3901  		DP_NOTICE(p_hwfn,
3902  			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3903  			  mcp_param, opcode);
3904  		return -EINVAL;
3905  	}
3906  
3907  	return 0;
3908  }
3909  
qed_mcp_resc_lock_default_init(struct qed_resc_lock_params * p_lock,struct qed_resc_unlock_params * p_unlock,enum qed_resc_lock resource,bool b_is_permanent)3910  void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3911  				    struct qed_resc_unlock_params *p_unlock,
3912  				    enum qed_resc_lock
3913  				    resource, bool b_is_permanent)
3914  {
3915  	if (p_lock) {
3916  		memset(p_lock, 0, sizeof(*p_lock));
3917  
3918  		/* Permanent resources don't require aging, and there's no
3919  		 * point in trying to acquire them more than once since it's
3920  		 * unexpected another entity would release them.
3921  		 */
3922  		if (b_is_permanent) {
3923  			p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3924  		} else {
3925  			p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3926  			p_lock->retry_interval =
3927  			    QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3928  			p_lock->sleep_b4_retry = true;
3929  		}
3930  
3931  		p_lock->resource = resource;
3932  	}
3933  
3934  	if (p_unlock) {
3935  		memset(p_unlock, 0, sizeof(*p_unlock));
3936  		p_unlock->resource = resource;
3937  	}
3938  }
3939  
qed_mcp_is_smart_an_supported(struct qed_hwfn * p_hwfn)3940  bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3941  {
3942  	return !!(p_hwfn->mcp_info->capabilities &
3943  		  FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3944  }
3945  
qed_mcp_get_capabilities(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3946  int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3947  {
3948  	u32 mcp_resp;
3949  	int rc;
3950  
3951  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3952  			 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3953  	if (!rc)
3954  		DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3955  			   "MFW supported features: %08x\n",
3956  			   p_hwfn->mcp_info->capabilities);
3957  
3958  	return rc;
3959  }
3960  
qed_mcp_set_capabilities(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3961  int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3962  {
3963  	u32 mcp_resp, mcp_param, features;
3964  
3965  	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3966  		   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
3967  		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
3968  
3969  	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3970  			   features, &mcp_resp, &mcp_param);
3971  }
3972  
qed_mcp_get_engine_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3973  int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3974  {
3975  	struct qed_mcp_mb_params mb_params = {0};
3976  	struct qed_dev *cdev = p_hwfn->cdev;
3977  	u8 fir_valid, l2_valid;
3978  	int rc;
3979  
3980  	mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3981  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3982  	if (rc)
3983  		return rc;
3984  
3985  	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3986  		DP_INFO(p_hwfn,
3987  			"The get_engine_config command is unsupported by the MFW\n");
3988  		return -EOPNOTSUPP;
3989  	}
3990  
3991  	fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3992  				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3993  	if (fir_valid)
3994  		cdev->fir_affin =
3995  		    QED_MFW_GET_FIELD(mb_params.mcp_param,
3996  				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3997  
3998  	l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3999  				     FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
4000  	if (l2_valid)
4001  		cdev->l2_affin_hint =
4002  		    QED_MFW_GET_FIELD(mb_params.mcp_param,
4003  				      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
4004  
4005  	DP_INFO(p_hwfn,
4006  		"Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
4007  		fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
4008  
4009  	return 0;
4010  }
4011  
qed_mcp_get_ppfid_bitmap(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)4012  int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4013  {
4014  	struct qed_mcp_mb_params mb_params = {0};
4015  	struct qed_dev *cdev = p_hwfn->cdev;
4016  	int rc;
4017  
4018  	mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
4019  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4020  	if (rc)
4021  		return rc;
4022  
4023  	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4024  		DP_INFO(p_hwfn,
4025  			"The get_ppfid_bitmap command is unsupported by the MFW\n");
4026  		return -EOPNOTSUPP;
4027  	}
4028  
4029  	cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
4030  					       FW_MB_PARAM_PPFID_BITMAP);
4031  
4032  	DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
4033  		   cdev->ppfid_bitmap);
4034  
4035  	return 0;
4036  }
4037  
qed_mcp_nvm_get_cfg(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 option_id,u8 entity_id,u16 flags,u8 * p_buf,u32 * p_len)4038  int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4039  			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
4040  			u32 *p_len)
4041  {
4042  	u32 mb_param = 0, resp, param;
4043  	int rc;
4044  
4045  	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
4046  	if (flags & QED_NVM_CFG_OPTION_INIT)
4047  		QED_MFW_SET_FIELD(mb_param,
4048  				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
4049  	if (flags & QED_NVM_CFG_OPTION_FREE)
4050  		QED_MFW_SET_FIELD(mb_param,
4051  				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4052  	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4053  		QED_MFW_SET_FIELD(mb_param,
4054  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4055  		QED_MFW_SET_FIELD(mb_param,
4056  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4057  				  entity_id);
4058  	}
4059  
4060  	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4061  				DRV_MSG_CODE_GET_NVM_CFG_OPTION,
4062  				mb_param, &resp, &param, p_len,
4063  				(u32 *)p_buf, false);
4064  
4065  	return rc;
4066  }
4067  
qed_mcp_nvm_set_cfg(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 option_id,u8 entity_id,u16 flags,u8 * p_buf,u32 len)4068  int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4069  			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
4070  			u32 len)
4071  {
4072  	u32 mb_param = 0, resp, param;
4073  
4074  	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
4075  	if (flags & QED_NVM_CFG_OPTION_ALL)
4076  		QED_MFW_SET_FIELD(mb_param,
4077  				  DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
4078  	if (flags & QED_NVM_CFG_OPTION_INIT)
4079  		QED_MFW_SET_FIELD(mb_param,
4080  				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
4081  	if (flags & QED_NVM_CFG_OPTION_COMMIT)
4082  		QED_MFW_SET_FIELD(mb_param,
4083  				  DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
4084  	if (flags & QED_NVM_CFG_OPTION_FREE)
4085  		QED_MFW_SET_FIELD(mb_param,
4086  				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4087  	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4088  		QED_MFW_SET_FIELD(mb_param,
4089  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4090  		QED_MFW_SET_FIELD(mb_param,
4091  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4092  				  entity_id);
4093  	}
4094  
4095  	return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
4096  				  DRV_MSG_CODE_SET_NVM_CFG_OPTION,
4097  				  mb_param, &resp, &param, len, (u32 *)p_buf);
4098  }
4099  
4100  #define QED_MCP_DBG_DATA_MAX_SIZE               MCP_DRV_NVM_BUF_LEN
4101  #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE        sizeof(u32)
4102  #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
4103  	(QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
4104  
4105  static int
__qed_mcp_send_debug_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 * p_buf,u8 size)4106  __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4107  			  struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
4108  {
4109  	struct qed_mcp_mb_params mb_params;
4110  	int rc;
4111  
4112  	if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
4113  		DP_ERR(p_hwfn,
4114  		       "Debug data size is %d while it should not exceed %d\n",
4115  		       size, QED_MCP_DBG_DATA_MAX_SIZE);
4116  		return -EINVAL;
4117  	}
4118  
4119  	memset(&mb_params, 0, sizeof(mb_params));
4120  	mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
4121  	SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
4122  	mb_params.p_data_src = p_buf;
4123  	mb_params.data_src_size = size;
4124  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4125  	if (rc)
4126  		return rc;
4127  
4128  	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4129  		DP_INFO(p_hwfn,
4130  			"The DEBUG_DATA_SEND command is unsupported by the MFW\n");
4131  		return -EOPNOTSUPP;
4132  	} else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
4133  		DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
4134  		return -EBUSY;
4135  	} else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
4136  		DP_NOTICE(p_hwfn,
4137  			  "Failed to send debug data to the MFW [resp 0x%08x]\n",
4138  			  mb_params.mcp_resp);
4139  		return -EINVAL;
4140  	}
4141  
4142  	return 0;
4143  }
4144  
4145  enum qed_mcp_dbg_data_type {
4146  	QED_MCP_DBG_DATA_TYPE_RAW,
4147  };
4148  
4149  /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
4150  #define QED_MCP_DBG_DATA_HDR_SN_OFFSET  0
4151  #define QED_MCP_DBG_DATA_HDR_SN_MASK            0x00000fff
4152  #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET        12
4153  #define QED_MCP_DBG_DATA_HDR_TYPE_MASK  0x000ff000
4154  #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET       20
4155  #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
4156  #define QED_MCP_DBG_DATA_HDR_PF_OFFSET  28
4157  #define QED_MCP_DBG_DATA_HDR_PF_MASK            0xf0000000
4158  
4159  #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST        0x1
4160  #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4161  
4162  static int
qed_mcp_send_debug_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_mcp_dbg_data_type type,u8 * p_buf,u32 size)4163  qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4164  			struct qed_ptt *p_ptt,
4165  			enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
4166  {
4167  	u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
4168  	u32 tmp_size = size, *p_header, *p_payload;
4169  	u8 flags = 0;
4170  	u16 seq;
4171  	int rc;
4172  
4173  	p_header = (u32 *)raw_data;
4174  	p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4175  
4176  	seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4177  
4178  	/* First chunk is marked as 'first' */
4179  	flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4180  
4181  	*p_header = 0;
4182  	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4183  	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4184  	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4185  	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4186  
4187  	while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4188  		memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4189  		rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4190  					       QED_MCP_DBG_DATA_MAX_SIZE);
4191  		if (rc)
4192  			return rc;
4193  
4194  		/* Clear the 'first' marking after sending the first chunk */
4195  		if (p_tmp_buf == p_buf) {
4196  			flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4197  			SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4198  				      flags);
4199  		}
4200  
4201  		p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4202  		tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4203  	}
4204  
4205  	/* Last chunk is marked as 'last' */
4206  	flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4207  	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4208  	memcpy(p_payload, p_tmp_buf, tmp_size);
4209  
4210  	/* Casting the left size to u8 is ok since at this point it is <= 32 */
4211  	return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4212  					 (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4213  					 tmp_size));
4214  }
4215  
4216  int
qed_mcp_send_raw_debug_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 * p_buf,u32 size)4217  qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4218  			    struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4219  {
4220  	return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4221  				       QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
4222  }
4223  
qed_mcp_is_esl_supported(struct qed_hwfn * p_hwfn)4224  bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn)
4225  {
4226  	return !!(p_hwfn->mcp_info->capabilities &
4227  		  FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK);
4228  }
4229  
qed_mcp_get_esl_status(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool * active)4230  int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active)
4231  {
4232  	u32 resp = 0, param = 0;
4233  	int rc;
4234  
4235  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, &param);
4236  	if (rc) {
4237  		DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc);
4238  		return rc;
4239  	}
4240  
4241  	*active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED);
4242  
4243  	return 0;
4244  }
4245