1  // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2  /* Copyright (c) 2015 - 2021 Intel Corporation */
3  #include <linux/etherdevice.h>
4  
5  #include "osdep.h"
6  #include "hmc.h"
7  #include "defs.h"
8  #include "type.h"
9  #include "ws.h"
10  #include "protos.h"
11  
12  /**
13   * irdma_get_qp_from_list - get next qp from a list
14   * @head: Listhead of qp's
15   * @qp: current qp
16   */
irdma_get_qp_from_list(struct list_head * head,struct irdma_sc_qp * qp)17  struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
18  					   struct irdma_sc_qp *qp)
19  {
20  	struct list_head *lastentry;
21  	struct list_head *entry = NULL;
22  
23  	if (list_empty(head))
24  		return NULL;
25  
26  	if (!qp) {
27  		entry = head->next;
28  	} else {
29  		lastentry = &qp->list;
30  		entry = lastentry->next;
31  		if (entry == head)
32  			return NULL;
33  	}
34  
35  	return container_of(entry, struct irdma_sc_qp, list);
36  }
37  
38  /**
39   * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
40   * @vsi: the VSI struct pointer
41   * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
42   */
irdma_sc_suspend_resume_qps(struct irdma_sc_vsi * vsi,u8 op)43  void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
44  {
45  	struct irdma_sc_qp *qp = NULL;
46  	u8 i;
47  
48  	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
49  		mutex_lock(&vsi->qos[i].qos_mutex);
50  		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
51  		while (qp) {
52  			if (op == IRDMA_OP_RESUME) {
53  				if (!qp->dev->ws_add(vsi, i)) {
54  					qp->qs_handle =
55  						vsi->qos[qp->user_pri].qs_handle;
56  					irdma_cqp_qp_suspend_resume(qp, op);
57  				} else {
58  					irdma_cqp_qp_suspend_resume(qp, op);
59  					irdma_modify_qp_to_err(qp);
60  				}
61  			} else if (op == IRDMA_OP_SUSPEND) {
62  				/* issue cqp suspend command */
63  				if (!irdma_cqp_qp_suspend_resume(qp, op))
64  					atomic_inc(&vsi->qp_suspend_reqs);
65  			}
66  			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
67  		}
68  		mutex_unlock(&vsi->qos[i].qos_mutex);
69  	}
70  }
71  
irdma_set_qos_info(struct irdma_sc_vsi * vsi,struct irdma_l2params * l2p)72  static void irdma_set_qos_info(struct irdma_sc_vsi  *vsi,
73  			       struct irdma_l2params *l2p)
74  {
75  	u8 i;
76  
77  	vsi->qos_rel_bw = l2p->vsi_rel_bw;
78  	vsi->qos_prio_type = l2p->vsi_prio_type;
79  	vsi->dscp_mode = l2p->dscp_mode;
80  	if (l2p->dscp_mode) {
81  		memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
82  		for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
83  			l2p->up2tc[i] = i;
84  	}
85  	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
86  		if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
87  			vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
88  		vsi->qos[i].traffic_class = l2p->up2tc[i];
89  		vsi->qos[i].rel_bw =
90  			l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
91  		vsi->qos[i].prio_type =
92  			l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
93  		vsi->qos[i].valid = false;
94  	}
95  }
96  
97  /**
98   * irdma_change_l2params - given the new l2 parameters, change all qp
99   * @vsi: RDMA VSI pointer
100   * @l2params: New parameters from l2
101   */
irdma_change_l2params(struct irdma_sc_vsi * vsi,struct irdma_l2params * l2params)102  void irdma_change_l2params(struct irdma_sc_vsi *vsi,
103  			   struct irdma_l2params *l2params)
104  {
105  	if (l2params->mtu_changed) {
106  		vsi->mtu = l2params->mtu;
107  		if (vsi->ieq)
108  			irdma_reinitialize_ieq(vsi);
109  	}
110  
111  	if (!l2params->tc_changed)
112  		return;
113  
114  	vsi->tc_change_pending = false;
115  	irdma_set_qos_info(vsi, l2params);
116  	irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
117  }
118  
119  /**
120   * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
121   * @qp: qp to be removed from qos
122   */
irdma_qp_rem_qos(struct irdma_sc_qp * qp)123  void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
124  {
125  	struct irdma_sc_vsi *vsi = qp->vsi;
126  
127  	ibdev_dbg(to_ibdev(qp->dev),
128  		  "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
129  		  qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
130  		  qp->on_qoslist);
131  	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
132  	if (qp->on_qoslist) {
133  		qp->on_qoslist = false;
134  		list_del(&qp->list);
135  	}
136  	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
137  }
138  
139  /**
140   * irdma_qp_add_qos - called during setctx for qp to be added to qos
141   * @qp: qp to be added to qos
142   */
irdma_qp_add_qos(struct irdma_sc_qp * qp)143  void irdma_qp_add_qos(struct irdma_sc_qp *qp)
144  {
145  	struct irdma_sc_vsi *vsi = qp->vsi;
146  
147  	ibdev_dbg(to_ibdev(qp->dev),
148  		  "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
149  		  qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
150  		  qp->on_qoslist);
151  	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
152  	if (!qp->on_qoslist) {
153  		list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
154  		qp->on_qoslist = true;
155  		qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
156  	}
157  	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
158  }
159  
160  /**
161   * irdma_sc_pd_init - initialize sc pd struct
162   * @dev: sc device struct
163   * @pd: sc pd ptr
164   * @pd_id: pd_id for allocated pd
165   * @abi_ver: User/Kernel ABI version
166   */
irdma_sc_pd_init(struct irdma_sc_dev * dev,struct irdma_sc_pd * pd,u32 pd_id,int abi_ver)167  void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
168  		      int abi_ver)
169  {
170  	pd->pd_id = pd_id;
171  	pd->abi_ver = abi_ver;
172  	pd->dev = dev;
173  }
174  
175  /**
176   * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
177   * @cqp: struct for cqp hw
178   * @info: arp entry information
179   * @scratch: u64 saved to be used during cqp completion
180   * @post_sq: flag for cqp db to ring
181   */
irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp * cqp,struct irdma_add_arp_cache_entry_info * info,u64 scratch,bool post_sq)182  static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
183  					struct irdma_add_arp_cache_entry_info *info,
184  					u64 scratch, bool post_sq)
185  {
186  	__le64 *wqe;
187  	u64 hdr;
188  
189  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
190  	if (!wqe)
191  		return -ENOMEM;
192  	set_64bit_val(wqe, 8, info->reach_max);
193  	set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
194  
195  	hdr = info->arp_index |
196  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
197  	      FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
198  	      FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
199  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
200  	dma_wmb(); /* make sure WQE is written before valid bit is set */
201  
202  	set_64bit_val(wqe, 24, hdr);
203  
204  	print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
205  			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
206  	if (post_sq)
207  		irdma_sc_cqp_post_sq(cqp);
208  
209  	return 0;
210  }
211  
212  /**
213   * irdma_sc_del_arp_cache_entry - dele arp cache entry
214   * @cqp: struct for cqp hw
215   * @scratch: u64 saved to be used during cqp completion
216   * @arp_index: arp index to delete arp entry
217   * @post_sq: flag for cqp db to ring
218   */
irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp * cqp,u64 scratch,u16 arp_index,bool post_sq)219  static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
220  					u16 arp_index, bool post_sq)
221  {
222  	__le64 *wqe;
223  	u64 hdr;
224  
225  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
226  	if (!wqe)
227  		return -ENOMEM;
228  
229  	hdr = arp_index |
230  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
231  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
232  	dma_wmb(); /* make sure WQE is written before valid bit is set */
233  
234  	set_64bit_val(wqe, 24, hdr);
235  
236  	print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
237  			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
238  			     IRDMA_CQP_WQE_SIZE * 8, false);
239  	if (post_sq)
240  		irdma_sc_cqp_post_sq(cqp);
241  
242  	return 0;
243  }
244  
245  /**
246   * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
247   * @cqp: struct for cqp hw
248   * @info: info for apbvt entry to add or delete
249   * @scratch: u64 saved to be used during cqp completion
250   * @post_sq: flag for cqp db to ring
251   */
irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp * cqp,struct irdma_apbvt_info * info,u64 scratch,bool post_sq)252  static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
253  				       struct irdma_apbvt_info *info,
254  				       u64 scratch, bool post_sq)
255  {
256  	__le64 *wqe;
257  	u64 hdr;
258  
259  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
260  	if (!wqe)
261  		return -ENOMEM;
262  
263  	set_64bit_val(wqe, 16, info->port);
264  
265  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
266  	      FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
267  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
268  	dma_wmb(); /* make sure WQE is written before valid bit is set */
269  
270  	set_64bit_val(wqe, 24, hdr);
271  
272  	print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
273  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
274  	if (post_sq)
275  		irdma_sc_cqp_post_sq(cqp);
276  
277  	return 0;
278  }
279  
280  /**
281   * irdma_sc_manage_qhash_table_entry - manage quad hash entries
282   * @cqp: struct for cqp hw
283   * @info: info for quad hash to manage
284   * @scratch: u64 saved to be used during cqp completion
285   * @post_sq: flag for cqp db to ring
286   *
287   * This is called before connection establishment is started.
288   * For passive connections, when listener is created, it will
289   * call with entry type of  IRDMA_QHASH_TYPE_TCP_SYN with local
290   * ip address and tcp port. When SYN is received (passive
291   * connections) or sent (active connections), this routine is
292   * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
293   * and quad is passed in info.
294   *
295   * When iwarp connection is done and its state moves to RTS, the
296   * quad hash entry in the hardware will point to iwarp's qp
297   * number and requires no calls from the driver.
298   */
299  static int
irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp * cqp,struct irdma_qhash_table_info * info,u64 scratch,bool post_sq)300  irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
301  				  struct irdma_qhash_table_info *info,
302  				  u64 scratch, bool post_sq)
303  {
304  	__le64 *wqe;
305  	u64 qw1 = 0;
306  	u64 qw2 = 0;
307  	u64 temp;
308  	struct irdma_sc_vsi *vsi = info->vsi;
309  
310  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
311  	if (!wqe)
312  		return -ENOMEM;
313  
314  	set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
315  
316  	qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
317  	      FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
318  	if (info->ipv4_valid) {
319  		set_64bit_val(wqe, 48,
320  			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
321  	} else {
322  		set_64bit_val(wqe, 56,
323  			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
324  			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
325  
326  		set_64bit_val(wqe, 48,
327  			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
328  			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
329  	}
330  	qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
331  			 vsi->qos[info->user_pri].qs_handle);
332  	if (info->vlan_valid)
333  		qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
334  	set_64bit_val(wqe, 16, qw2);
335  	if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
336  		qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
337  		if (!info->ipv4_valid) {
338  			set_64bit_val(wqe, 40,
339  				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
340  				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
341  			set_64bit_val(wqe, 32,
342  				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
343  				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
344  		} else {
345  			set_64bit_val(wqe, 32,
346  				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
347  		}
348  	}
349  
350  	set_64bit_val(wqe, 8, qw1);
351  	temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
352  	       FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
353  			  IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
354  	       FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
355  	       FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
356  	       FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
357  	       FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
358  	dma_wmb(); /* make sure WQE is written before valid bit is set */
359  
360  	set_64bit_val(wqe, 24, temp);
361  
362  	print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
363  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
364  	if (post_sq)
365  		irdma_sc_cqp_post_sq(cqp);
366  
367  	return 0;
368  }
369  
370  /**
371   * irdma_sc_qp_init - initialize qp
372   * @qp: sc qp
373   * @info: initialization qp info
374   */
irdma_sc_qp_init(struct irdma_sc_qp * qp,struct irdma_qp_init_info * info)375  int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
376  {
377  	int ret_code;
378  	u32 pble_obj_cnt;
379  	u16 wqe_size;
380  
381  	if (info->qp_uk_init_info.max_sq_frag_cnt >
382  	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
383  	    info->qp_uk_init_info.max_rq_frag_cnt >
384  	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
385  		return -EINVAL;
386  
387  	qp->dev = info->pd->dev;
388  	qp->vsi = info->vsi;
389  	qp->ieq_qp = info->vsi->exception_lan_q;
390  	qp->sq_pa = info->sq_pa;
391  	qp->rq_pa = info->rq_pa;
392  	qp->hw_host_ctx_pa = info->host_ctx_pa;
393  	qp->q2_pa = info->q2_pa;
394  	qp->shadow_area_pa = info->shadow_area_pa;
395  	qp->q2_buf = info->q2;
396  	qp->pd = info->pd;
397  	qp->hw_host_ctx = info->host_ctx;
398  	info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
399  	ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
400  	if (ret_code)
401  		return ret_code;
402  
403  	qp->virtual_map = info->virtual_map;
404  	pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
405  
406  	if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
407  	    (info->virtual_map && info->rq_pa >= pble_obj_cnt))
408  		return -EINVAL;
409  
410  	qp->llp_stream_handle = (void *)(-1);
411  	qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
412  						    IRDMA_QUEUE_TYPE_SQ_RQ);
413  	ibdev_dbg(to_ibdev(qp->dev),
414  		  "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
415  		  qp->hw_sq_size, qp->qp_uk.sq_ring.size);
416  	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
417  		wqe_size = IRDMA_WQE_SIZE_128;
418  	else
419  		ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
420  						       &wqe_size);
421  	if (ret_code)
422  		return ret_code;
423  
424  	qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
425  				(wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
426  	ibdev_dbg(to_ibdev(qp->dev),
427  		  "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
428  		  qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
429  	qp->sq_tph_val = info->sq_tph_val;
430  	qp->rq_tph_val = info->rq_tph_val;
431  	qp->sq_tph_en = info->sq_tph_en;
432  	qp->rq_tph_en = info->rq_tph_en;
433  	qp->rcv_tph_en = info->rcv_tph_en;
434  	qp->xmit_tph_en = info->xmit_tph_en;
435  	qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
436  	qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
437  
438  	return 0;
439  }
440  
441  /**
442   * irdma_sc_qp_create - create qp
443   * @qp: sc qp
444   * @info: qp create info
445   * @scratch: u64 saved to be used during cqp completion
446   * @post_sq: flag for cqp db to ring
447   */
irdma_sc_qp_create(struct irdma_sc_qp * qp,struct irdma_create_qp_info * info,u64 scratch,bool post_sq)448  int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
449  		       u64 scratch, bool post_sq)
450  {
451  	struct irdma_sc_cqp *cqp;
452  	__le64 *wqe;
453  	u64 hdr;
454  
455  	cqp = qp->dev->cqp;
456  	if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
457  	    qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
458  		return -EINVAL;
459  
460  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
461  	if (!wqe)
462  		return -ENOMEM;
463  
464  	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
465  	set_64bit_val(wqe, 40, qp->shadow_area_pa);
466  
467  	hdr = qp->qp_uk.qp_id |
468  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
469  	      FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
470  	      FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
471  	      FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
472  	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
473  	      FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
474  	      FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
475  	      FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
476  	      FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
477  			 info->arp_cache_idx_valid) |
478  	      FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
479  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
480  	dma_wmb(); /* make sure WQE is written before valid bit is set */
481  
482  	set_64bit_val(wqe, 24, hdr);
483  
484  	print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
485  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
486  	if (post_sq)
487  		irdma_sc_cqp_post_sq(cqp);
488  
489  	return 0;
490  }
491  
492  /**
493   * irdma_sc_qp_modify - modify qp cqp wqe
494   * @qp: sc qp
495   * @info: modify qp info
496   * @scratch: u64 saved to be used during cqp completion
497   * @post_sq: flag for cqp db to ring
498   */
irdma_sc_qp_modify(struct irdma_sc_qp * qp,struct irdma_modify_qp_info * info,u64 scratch,bool post_sq)499  int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
500  		       u64 scratch, bool post_sq)
501  {
502  	__le64 *wqe;
503  	struct irdma_sc_cqp *cqp;
504  	u64 hdr;
505  	u8 term_actions = 0;
506  	u8 term_len = 0;
507  
508  	cqp = qp->dev->cqp;
509  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
510  	if (!wqe)
511  		return -ENOMEM;
512  
513  	if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
514  		if (info->dont_send_fin)
515  			term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
516  		if (info->dont_send_term)
517  			term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
518  		if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
519  		    term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
520  			term_len = info->termlen;
521  	}
522  
523  	set_64bit_val(wqe, 8,
524  		      FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
525  		      FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
526  	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
527  	set_64bit_val(wqe, 40, qp->shadow_area_pa);
528  
529  	hdr = qp->qp_uk.qp_id |
530  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
531  	      FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
532  	      FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
533  	      FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
534  			 info->cached_var_valid) |
535  	      FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
536  	      FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
537  	      FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
538  	      FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
539  	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
540  	      FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
541  	      FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
542  			 info->remove_hash_idx) |
543  	      FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
544  	      FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
545  	      FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
546  			 info->arp_cache_idx_valid) |
547  	      FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
548  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
549  	dma_wmb(); /* make sure WQE is written before valid bit is set */
550  
551  	set_64bit_val(wqe, 24, hdr);
552  
553  	print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
554  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
555  	if (post_sq)
556  		irdma_sc_cqp_post_sq(cqp);
557  
558  	return 0;
559  }
560  
561  /**
562   * irdma_sc_qp_destroy - cqp destroy qp
563   * @qp: sc qp
564   * @scratch: u64 saved to be used during cqp completion
565   * @remove_hash_idx: flag if to remove hash idx
566   * @ignore_mw_bnd: memory window bind flag
567   * @post_sq: flag for cqp db to ring
568   */
irdma_sc_qp_destroy(struct irdma_sc_qp * qp,u64 scratch,bool remove_hash_idx,bool ignore_mw_bnd,bool post_sq)569  int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
570  			bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
571  {
572  	__le64 *wqe;
573  	struct irdma_sc_cqp *cqp;
574  	u64 hdr;
575  
576  	cqp = qp->dev->cqp;
577  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
578  	if (!wqe)
579  		return -ENOMEM;
580  
581  	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
582  	set_64bit_val(wqe, 40, qp->shadow_area_pa);
583  
584  	hdr = qp->qp_uk.qp_id |
585  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
586  	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
587  	      FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
588  	      FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
589  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
590  	dma_wmb(); /* make sure WQE is written before valid bit is set */
591  
592  	set_64bit_val(wqe, 24, hdr);
593  
594  	print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
595  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
596  	if (post_sq)
597  		irdma_sc_cqp_post_sq(cqp);
598  
599  	return 0;
600  }
601  
602  /**
603   * irdma_sc_get_encoded_ird_size -
604   * @ird_size: IRD size
605   * The ird from the connection is rounded to a supported HW setting and then encoded
606   * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
607   * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
608   */
irdma_sc_get_encoded_ird_size(u16 ird_size)609  static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
610  {
611  	switch (ird_size ?
612  		roundup_pow_of_two(2 * ird_size) : 4) {
613  	case 256:
614  		return IRDMA_IRD_HW_SIZE_256;
615  	case 128:
616  		return IRDMA_IRD_HW_SIZE_128;
617  	case 64:
618  	case 32:
619  		return IRDMA_IRD_HW_SIZE_64;
620  	case 16:
621  	case 8:
622  		return IRDMA_IRD_HW_SIZE_16;
623  	case 4:
624  	default:
625  		break;
626  	}
627  
628  	return IRDMA_IRD_HW_SIZE_4;
629  }
630  
631  /**
632   * irdma_sc_qp_setctx_roce - set qp's context
633   * @qp: sc qp
634   * @qp_ctx: context ptr
635   * @info: ctx info
636   */
irdma_sc_qp_setctx_roce(struct irdma_sc_qp * qp,__le64 * qp_ctx,struct irdma_qp_host_ctx_info * info)637  void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
638  			     struct irdma_qp_host_ctx_info *info)
639  {
640  	struct irdma_roce_offload_info *roce_info;
641  	struct irdma_udp_offload_info *udp;
642  	u8 push_mode_en;
643  	u32 push_idx;
644  
645  	roce_info = info->roce_info;
646  	udp = info->udp_info;
647  	qp->user_pri = info->user_pri;
648  	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
649  		push_mode_en = 0;
650  		push_idx = 0;
651  	} else {
652  		push_mode_en = 1;
653  		push_idx = qp->push_idx;
654  	}
655  	set_64bit_val(qp_ctx, 0,
656  		      FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
657  		      FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
658  		      FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
659  		      FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
660  		      FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
661  		      FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
662  		      FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
663  		      FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
664  		      FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
665  		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
666  		      FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
667  		      FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
668  		      FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
669  		      FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
670  	set_64bit_val(qp_ctx, 8, qp->sq_pa);
671  	set_64bit_val(qp_ctx, 16, qp->rq_pa);
672  	if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
673  	    !(udp->tos & 0x03))
674  		udp->tos |= ECN_CODE_PT_VAL;
675  	set_64bit_val(qp_ctx, 24,
676  		      FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
677  		      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
678  		      FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
679  		      FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
680  		      FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
681  	set_64bit_val(qp_ctx, 32,
682  		      FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
683  		      FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
684  	set_64bit_val(qp_ctx, 40,
685  		      FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
686  		      FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
687  	set_64bit_val(qp_ctx, 48,
688  		      FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
689  		      FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
690  		      FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
691  	set_64bit_val(qp_ctx, 56,
692  		      FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
693  		      FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
694  		      FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
695  		      FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
696  	set_64bit_val(qp_ctx, 64,
697  		      FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
698  		      FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
699  	set_64bit_val(qp_ctx, 80,
700  		      FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
701  		      FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
702  	set_64bit_val(qp_ctx, 88,
703  		      FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
704  	set_64bit_val(qp_ctx, 96,
705  		      FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
706  		      FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
707  	set_64bit_val(qp_ctx, 112,
708  		      FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
709  	set_64bit_val(qp_ctx, 128,
710  		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
711  		      FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
712  		      FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
713  		      FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
714  	set_64bit_val(qp_ctx, 136,
715  		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
716  		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
717  	set_64bit_val(qp_ctx, 144,
718  		      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
719  	set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
720  	set_64bit_val(qp_ctx, 160,
721  		      FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
722  		      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
723  		      FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
724  		      FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
725  		      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
726  		      FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
727  		      FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
728  		      FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
729  		      FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
730  		      FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
731  		      FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
732  		      FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
733  		      FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
734  	set_64bit_val(qp_ctx, 168,
735  		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
736  	set_64bit_val(qp_ctx, 176,
737  		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
738  		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
739  		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
740  	set_64bit_val(qp_ctx, 184,
741  		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
742  		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
743  	set_64bit_val(qp_ctx, 192,
744  		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
745  		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
746  	set_64bit_val(qp_ctx, 200,
747  		      FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
748  		      FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
749  	set_64bit_val(qp_ctx, 208,
750  		      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
751  
752  	print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
753  			     8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
754  }
755  
756  /* irdma_sc_alloc_local_mac_entry - allocate a mac entry
757   * @cqp: struct for cqp hw
758   * @scratch: u64 saved to be used during cqp completion
759   * @post_sq: flag for cqp db to ring
760   */
irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp * cqp,u64 scratch,bool post_sq)761  static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
762  					  bool post_sq)
763  {
764  	__le64 *wqe;
765  	u64 hdr;
766  
767  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
768  	if (!wqe)
769  		return -ENOMEM;
770  
771  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
772  			 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
773  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
774  
775  	dma_wmb(); /* make sure WQE is written before valid bit is set */
776  
777  	set_64bit_val(wqe, 24, hdr);
778  
779  	print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
780  			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
781  			     IRDMA_CQP_WQE_SIZE * 8, false);
782  
783  	if (post_sq)
784  		irdma_sc_cqp_post_sq(cqp);
785  	return 0;
786  }
787  
788  /**
789   * irdma_sc_add_local_mac_entry - add mac enry
790   * @cqp: struct for cqp hw
791   * @info:mac addr info
792   * @scratch: u64 saved to be used during cqp completion
793   * @post_sq: flag for cqp db to ring
794   */
irdma_sc_add_local_mac_entry(struct irdma_sc_cqp * cqp,struct irdma_local_mac_entry_info * info,u64 scratch,bool post_sq)795  static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
796  					struct irdma_local_mac_entry_info *info,
797  					u64 scratch, bool post_sq)
798  {
799  	__le64 *wqe;
800  	u64 header;
801  
802  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
803  	if (!wqe)
804  		return -ENOMEM;
805  
806  	set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
807  
808  	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
809  		 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
810  			    IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
811  		 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
812  
813  	dma_wmb(); /* make sure WQE is written before valid bit is set */
814  
815  	set_64bit_val(wqe, 24, header);
816  
817  	print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
818  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
819  
820  	if (post_sq)
821  		irdma_sc_cqp_post_sq(cqp);
822  	return 0;
823  }
824  
825  /**
826   * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
827   * @cqp: struct for cqp hw
828   * @scratch: u64 saved to be used during cqp completion
829   * @entry_idx: index of mac entry
830   * @ignore_ref_count: to force mac adde delete
831   * @post_sq: flag for cqp db to ring
832   */
irdma_sc_del_local_mac_entry(struct irdma_sc_cqp * cqp,u64 scratch,u16 entry_idx,u8 ignore_ref_count,bool post_sq)833  static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
834  					u16 entry_idx, u8 ignore_ref_count,
835  					bool post_sq)
836  {
837  	__le64 *wqe;
838  	u64 header;
839  
840  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
841  	if (!wqe)
842  		return -ENOMEM;
843  	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
844  		 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
845  			    IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
846  		 FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
847  		 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
848  		 FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
849  
850  	dma_wmb(); /* make sure WQE is written before valid bit is set */
851  
852  	set_64bit_val(wqe, 24, header);
853  
854  	print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
855  			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
856  			     IRDMA_CQP_WQE_SIZE * 8, false);
857  
858  	if (post_sq)
859  		irdma_sc_cqp_post_sq(cqp);
860  	return 0;
861  }
862  
863  /**
864   * irdma_sc_qp_setctx - set qp's context
865   * @qp: sc qp
866   * @qp_ctx: context ptr
867   * @info: ctx info
868   */
irdma_sc_qp_setctx(struct irdma_sc_qp * qp,__le64 * qp_ctx,struct irdma_qp_host_ctx_info * info)869  void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
870  			struct irdma_qp_host_ctx_info *info)
871  {
872  	struct irdma_iwarp_offload_info *iw;
873  	struct irdma_tcp_offload_info *tcp;
874  	struct irdma_sc_dev *dev;
875  	u8 push_mode_en;
876  	u32 push_idx;
877  	u64 qw0, qw3, qw7 = 0, qw16 = 0;
878  	u64 mac = 0;
879  
880  	iw = info->iwarp_info;
881  	tcp = info->tcp_info;
882  	dev = qp->dev;
883  	if (iw->rcv_mark_en) {
884  		qp->pfpdu.marker_len = 4;
885  		qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
886  	}
887  	qp->user_pri = info->user_pri;
888  	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
889  		push_mode_en = 0;
890  		push_idx = 0;
891  	} else {
892  		push_mode_en = 1;
893  		push_idx = qp->push_idx;
894  	}
895  	qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
896  	      FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
897  	      FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
898  	      FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
899  	      FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
900  	      FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
901  	      FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
902  
903  	set_64bit_val(qp_ctx, 8, qp->sq_pa);
904  	set_64bit_val(qp_ctx, 16, qp->rq_pa);
905  
906  	qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
907  	      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
908  	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
909  		qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
910  				  qp->src_mac_addr_idx);
911  	set_64bit_val(qp_ctx, 136,
912  		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
913  		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
914  	set_64bit_val(qp_ctx, 168,
915  		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
916  	set_64bit_val(qp_ctx, 176,
917  		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
918  		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
919  		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
920  		      FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
921  	if (info->iwarp_info_valid) {
922  		qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
923  		       FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
924  		       FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
925  		       FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
926  		       FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
927  		       FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
928  		       FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
929  				  iw->err_rq_idx_valid);
930  		qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
931  		qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
932  			FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
933  		set_64bit_val(qp_ctx, 144,
934  			      FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
935  			      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
936  
937  		if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
938  			mac = ether_addr_to_u64(iw->mac_addr);
939  
940  		set_64bit_val(qp_ctx, 152,
941  			      mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
942  		set_64bit_val(qp_ctx, 160,
943  			      FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
944  			      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
945  			      FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
946  			      FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
947  			      FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
948  			      FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
949  			      FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
950  			      FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
951  			      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
952  			      FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
953  			      FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
954  			      FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
955  			      FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
956  			      FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
957  			      FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
958  			      FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
959  	}
960  	if (info->tcp_info_valid) {
961  		qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
962  		       FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
963  		       FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
964  				  tcp->insert_vlan_tag) |
965  		       FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
966  		       FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
967  		       FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
968  		       FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
969  
970  		if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
971  			tcp->tos |= ECN_CODE_PT_VAL;
972  
973  		qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
974  		       FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
975  		       FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
976  		       FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
977  		       FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
978  		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
979  			qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
980  
981  			qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
982  		}
983  		set_64bit_val(qp_ctx, 32,
984  			      FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
985  			      FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
986  		set_64bit_val(qp_ctx, 40,
987  			      FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
988  			      FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
989  		set_64bit_val(qp_ctx, 48,
990  			      FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
991  			      FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
992  			      FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
993  			      FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
994  		qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
995  		       FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
996  		       FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
997  				  tcp->ignore_tcp_opt) |
998  		       FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
999  				  tcp->ignore_tcp_uns_opt) |
1000  		       FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
1001  		       FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
1002  		       FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
1003  		set_64bit_val(qp_ctx, 72,
1004  			      FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
1005  			      FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
1006  		set_64bit_val(qp_ctx, 80,
1007  			      FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
1008  			      FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
1009  		set_64bit_val(qp_ctx, 88,
1010  			      FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
1011  			      FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
1012  		set_64bit_val(qp_ctx, 96,
1013  			      FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
1014  			      FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
1015  		set_64bit_val(qp_ctx, 104,
1016  			      FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
1017  			      FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
1018  		set_64bit_val(qp_ctx, 112,
1019  			      FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
1020  			      FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
1021  		set_64bit_val(qp_ctx, 120,
1022  			      FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
1023  			      FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
1024  		qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
1025  			FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
1026  		set_64bit_val(qp_ctx, 184,
1027  			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
1028  			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
1029  		set_64bit_val(qp_ctx, 192,
1030  			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
1031  			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
1032  		set_64bit_val(qp_ctx, 200,
1033  			      FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
1034  			      FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
1035  		set_64bit_val(qp_ctx, 208,
1036  			      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
1037  	}
1038  
1039  	set_64bit_val(qp_ctx, 0, qw0);
1040  	set_64bit_val(qp_ctx, 24, qw3);
1041  	set_64bit_val(qp_ctx, 56, qw7);
1042  	set_64bit_val(qp_ctx, 128, qw16);
1043  
1044  	print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
1045  			     qp_ctx, IRDMA_QP_CTX_SIZE, false);
1046  }
1047  
1048  /**
1049   * irdma_sc_alloc_stag - mr stag alloc
1050   * @dev: sc device struct
1051   * @info: stag info
1052   * @scratch: u64 saved to be used during cqp completion
1053   * @post_sq: flag for cqp db to ring
1054   */
irdma_sc_alloc_stag(struct irdma_sc_dev * dev,struct irdma_allocate_stag_info * info,u64 scratch,bool post_sq)1055  static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
1056  			       struct irdma_allocate_stag_info *info,
1057  			       u64 scratch, bool post_sq)
1058  {
1059  	__le64 *wqe;
1060  	struct irdma_sc_cqp *cqp;
1061  	u64 hdr;
1062  	enum irdma_page_size page_size;
1063  
1064  	if (!info->total_len && !info->all_memory)
1065  		return -EINVAL;
1066  
1067  	if (info->page_size == 0x40000000)
1068  		page_size = IRDMA_PAGE_SIZE_1G;
1069  	else if (info->page_size == 0x200000)
1070  		page_size = IRDMA_PAGE_SIZE_2M;
1071  	else
1072  		page_size = IRDMA_PAGE_SIZE_4K;
1073  
1074  	cqp = dev->cqp;
1075  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1076  	if (!wqe)
1077  		return -ENOMEM;
1078  
1079  	set_64bit_val(wqe, 8,
1080  		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
1081  		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
1082  	set_64bit_val(wqe, 16,
1083  		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1084  	set_64bit_val(wqe, 40,
1085  		      FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
1086  
1087  	if (info->chunk_size)
1088  		set_64bit_val(wqe, 48,
1089  			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
1090  
1091  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1092  	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1093  	      FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1094  	      FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1095  	      FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1096  	      FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
1097  	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1098  	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1099  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1100  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1101  
1102  	set_64bit_val(wqe, 24, hdr);
1103  
1104  	print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
1105  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1106  	if (post_sq)
1107  		irdma_sc_cqp_post_sq(cqp);
1108  
1109  	return 0;
1110  }
1111  
1112  /**
1113   * irdma_sc_mr_reg_non_shared - non-shared mr registration
1114   * @dev: sc device struct
1115   * @info: mr info
1116   * @scratch: u64 saved to be used during cqp completion
1117   * @post_sq: flag for cqp db to ring
1118   */
irdma_sc_mr_reg_non_shared(struct irdma_sc_dev * dev,struct irdma_reg_ns_stag_info * info,u64 scratch,bool post_sq)1119  static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
1120  				      struct irdma_reg_ns_stag_info *info,
1121  				      u64 scratch, bool post_sq)
1122  {
1123  	__le64 *wqe;
1124  	u64 fbo;
1125  	struct irdma_sc_cqp *cqp;
1126  	u64 hdr;
1127  	u32 pble_obj_cnt;
1128  	bool remote_access;
1129  	u8 addr_type;
1130  	enum irdma_page_size page_size;
1131  
1132  	if (!info->total_len && !info->all_memory)
1133  		return -EINVAL;
1134  
1135  	if (info->page_size == 0x40000000)
1136  		page_size = IRDMA_PAGE_SIZE_1G;
1137  	else if (info->page_size == 0x200000)
1138  		page_size = IRDMA_PAGE_SIZE_2M;
1139  	else if (info->page_size == 0x1000)
1140  		page_size = IRDMA_PAGE_SIZE_4K;
1141  	else
1142  		return -EINVAL;
1143  
1144  	if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
1145  				   IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
1146  		remote_access = true;
1147  	else
1148  		remote_access = false;
1149  
1150  	pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
1151  	if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
1152  		return -EINVAL;
1153  
1154  	cqp = dev->cqp;
1155  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1156  	if (!wqe)
1157  		return -ENOMEM;
1158  	fbo = info->va & (info->page_size - 1);
1159  
1160  	set_64bit_val(wqe, 0,
1161  		      (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
1162  		      info->va : fbo));
1163  	set_64bit_val(wqe, 8,
1164  		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
1165  		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1166  	set_64bit_val(wqe, 16,
1167  		      FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
1168  		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1169  	if (!info->chunk_size) {
1170  		set_64bit_val(wqe, 32, info->reg_addr_pa);
1171  		set_64bit_val(wqe, 48, 0);
1172  	} else {
1173  		set_64bit_val(wqe, 32, 0);
1174  		set_64bit_val(wqe, 48,
1175  			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
1176  	}
1177  	set_64bit_val(wqe, 40, info->hmc_fcn_index);
1178  	set_64bit_val(wqe, 56, 0);
1179  
1180  	addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
1181  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
1182  	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1183  	      FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1184  	      FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1185  	      FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1186  	      FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
1187  	      FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
1188  	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1189  	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1190  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1191  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1192  
1193  	set_64bit_val(wqe, 24, hdr);
1194  
1195  	print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
1196  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1197  	if (post_sq)
1198  		irdma_sc_cqp_post_sq(cqp);
1199  
1200  	return 0;
1201  }
1202  
1203  /**
1204   * irdma_sc_dealloc_stag - deallocate stag
1205   * @dev: sc device struct
1206   * @info: dealloc stag info
1207   * @scratch: u64 saved to be used during cqp completion
1208   * @post_sq: flag for cqp db to ring
1209   */
irdma_sc_dealloc_stag(struct irdma_sc_dev * dev,struct irdma_dealloc_stag_info * info,u64 scratch,bool post_sq)1210  static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
1211  				 struct irdma_dealloc_stag_info *info,
1212  				 u64 scratch, bool post_sq)
1213  {
1214  	u64 hdr;
1215  	__le64 *wqe;
1216  	struct irdma_sc_cqp *cqp;
1217  
1218  	cqp = dev->cqp;
1219  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1220  	if (!wqe)
1221  		return -ENOMEM;
1222  
1223  	set_64bit_val(wqe, 8,
1224  		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1225  	set_64bit_val(wqe, 16,
1226  		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1227  
1228  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
1229  	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
1230  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1231  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1232  
1233  	set_64bit_val(wqe, 24, hdr);
1234  
1235  	print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
1236  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1237  	if (post_sq)
1238  		irdma_sc_cqp_post_sq(cqp);
1239  
1240  	return 0;
1241  }
1242  
1243  /**
1244   * irdma_sc_mw_alloc - mw allocate
1245   * @dev: sc device struct
1246   * @info: memory window allocation information
1247   * @scratch: u64 saved to be used during cqp completion
1248   * @post_sq: flag for cqp db to ring
1249   */
irdma_sc_mw_alloc(struct irdma_sc_dev * dev,struct irdma_mw_alloc_info * info,u64 scratch,bool post_sq)1250  static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
1251  			     struct irdma_mw_alloc_info *info, u64 scratch,
1252  			     bool post_sq)
1253  {
1254  	u64 hdr;
1255  	struct irdma_sc_cqp *cqp;
1256  	__le64 *wqe;
1257  
1258  	cqp = dev->cqp;
1259  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1260  	if (!wqe)
1261  		return -ENOMEM;
1262  
1263  	set_64bit_val(wqe, 8,
1264  		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1265  	set_64bit_val(wqe, 16,
1266  		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
1267  
1268  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1269  	      FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
1270  	      FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
1271  			 info->mw1_bind_dont_vldt_key) |
1272  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1273  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1274  
1275  	set_64bit_val(wqe, 24, hdr);
1276  
1277  	print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
1278  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1279  	if (post_sq)
1280  		irdma_sc_cqp_post_sq(cqp);
1281  
1282  	return 0;
1283  }
1284  
1285  /**
1286   * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
1287   * @qp: sc qp struct
1288   * @info: fast mr info
1289   * @post_sq: flag for cqp db to ring
1290   */
irdma_sc_mr_fast_register(struct irdma_sc_qp * qp,struct irdma_fast_reg_stag_info * info,bool post_sq)1291  int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
1292  			      struct irdma_fast_reg_stag_info *info,
1293  			      bool post_sq)
1294  {
1295  	u64 temp, hdr;
1296  	__le64 *wqe;
1297  	u32 wqe_idx;
1298  	enum irdma_page_size page_size;
1299  	struct irdma_post_sq_info sq_info = {};
1300  
1301  	if (info->page_size == 0x40000000)
1302  		page_size = IRDMA_PAGE_SIZE_1G;
1303  	else if (info->page_size == 0x200000)
1304  		page_size = IRDMA_PAGE_SIZE_2M;
1305  	else
1306  		page_size = IRDMA_PAGE_SIZE_4K;
1307  
1308  	sq_info.wr_id = info->wr_id;
1309  	sq_info.signaled = info->signaled;
1310  
1311  	wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
1312  					 IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
1313  	if (!wqe)
1314  		return -ENOMEM;
1315  
1316  	irdma_clr_wqes(&qp->qp_uk, wqe_idx);
1317  
1318  	ibdev_dbg(to_ibdev(qp->dev),
1319  		  "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
1320  		  info->wr_id, wqe_idx,
1321  		  &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
1322  
1323  	temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
1324  		(uintptr_t)info->va : info->fbo;
1325  	set_64bit_val(wqe, 0, temp);
1326  
1327  	temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
1328  			 info->first_pm_pbl_index >> 16);
1329  	set_64bit_val(wqe, 8,
1330  		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
1331  		      FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
1332  	set_64bit_val(wqe, 16,
1333  		      info->total_len |
1334  		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
1335  
1336  	hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
1337  	      FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
1338  	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
1339  	      FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
1340  	      FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
1341  	      FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
1342  	      FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
1343  	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
1344  	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
1345  	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
1346  	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1347  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1348  
1349  	set_64bit_val(wqe, 24, hdr);
1350  
1351  	print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
1352  			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1353  
1354  	if (post_sq)
1355  		irdma_uk_qp_post_wr(&qp->qp_uk);
1356  
1357  	return 0;
1358  }
1359  
1360  /**
1361   * irdma_sc_gen_rts_ae - request AE generated after RTS
1362   * @qp: sc qp struct
1363   */
irdma_sc_gen_rts_ae(struct irdma_sc_qp * qp)1364  static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
1365  {
1366  	__le64 *wqe;
1367  	u64 hdr;
1368  	struct irdma_qp_uk *qp_uk;
1369  
1370  	qp_uk = &qp->qp_uk;
1371  
1372  	wqe = qp_uk->sq_base[1].elem;
1373  
1374  	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1375  	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
1376  	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1377  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1378  
1379  	set_64bit_val(wqe, 24, hdr);
1380  	print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
1381  			     16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1382  
1383  	wqe = qp_uk->sq_base[2].elem;
1384  	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
1385  	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1386  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1387  
1388  	set_64bit_val(wqe, 24, hdr);
1389  	print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
1390  			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1391  }
1392  
1393  /**
1394   * irdma_sc_send_lsmm - send last streaming mode message
1395   * @qp: sc qp struct
1396   * @lsmm_buf: buffer with lsmm message
1397   * @size: size of lsmm buffer
1398   * @stag: stag of lsmm buffer
1399   */
irdma_sc_send_lsmm(struct irdma_sc_qp * qp,void * lsmm_buf,u32 size,irdma_stag stag)1400  void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1401  			irdma_stag stag)
1402  {
1403  	__le64 *wqe;
1404  	u64 hdr;
1405  	struct irdma_qp_uk *qp_uk;
1406  
1407  	qp_uk = &qp->qp_uk;
1408  	wqe = qp_uk->sq_base->elem;
1409  
1410  	set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
1411  	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1412  		set_64bit_val(wqe, 8,
1413  			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
1414  			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
1415  	} else {
1416  		set_64bit_val(wqe, 8,
1417  			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
1418  			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
1419  			      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1420  	}
1421  	set_64bit_val(wqe, 16, 0);
1422  
1423  	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
1424  	      FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
1425  	      FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
1426  	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1427  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1428  
1429  	set_64bit_val(wqe, 24, hdr);
1430  
1431  	print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
1432  			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1433  
1434  	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1435  		irdma_sc_gen_rts_ae(qp);
1436  }
1437  
1438  /**
1439   * irdma_sc_send_rtt - send last read0 or write0
1440   * @qp: sc qp struct
1441   * @read: Do read0 or write0
1442   */
irdma_sc_send_rtt(struct irdma_sc_qp * qp,bool read)1443  void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
1444  {
1445  	__le64 *wqe;
1446  	u64 hdr;
1447  	struct irdma_qp_uk *qp_uk;
1448  
1449  	qp_uk = &qp->qp_uk;
1450  	wqe = qp_uk->sq_base->elem;
1451  
1452  	set_64bit_val(wqe, 0, 0);
1453  	set_64bit_val(wqe, 16, 0);
1454  	if (read) {
1455  		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1456  			set_64bit_val(wqe, 8,
1457  				      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
1458  		} else {
1459  			set_64bit_val(wqe, 8,
1460  				      (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1461  		}
1462  		hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
1463  		      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
1464  		      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1465  
1466  	} else {
1467  		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1468  			set_64bit_val(wqe, 8, 0);
1469  		} else {
1470  			set_64bit_val(wqe, 8,
1471  				      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1472  		}
1473  		hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
1474  		      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1475  	}
1476  
1477  	dma_wmb(); /* make sure WQE is written before valid bit is set */
1478  
1479  	set_64bit_val(wqe, 24, hdr);
1480  
1481  	print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
1482  			     IRDMA_QP_WQE_MIN_SIZE, false);
1483  
1484  	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1485  		irdma_sc_gen_rts_ae(qp);
1486  }
1487  
1488  /**
1489   * irdma_iwarp_opcode - determine if incoming is rdma layer
1490   * @info: aeq info for the packet
1491   * @pkt: packet for error
1492   */
irdma_iwarp_opcode(struct irdma_aeqe_info * info,u8 * pkt)1493  static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
1494  {
1495  	__be16 *mpa;
1496  	u32 opcode = 0xffffffff;
1497  
1498  	if (info->q2_data_written) {
1499  		mpa = (__be16 *)pkt;
1500  		opcode = ntohs(mpa[1]) & 0xf;
1501  	}
1502  
1503  	return opcode;
1504  }
1505  
1506  /**
1507   * irdma_locate_mpa - return pointer to mpa in the pkt
1508   * @pkt: packet with data
1509   */
irdma_locate_mpa(u8 * pkt)1510  static u8 *irdma_locate_mpa(u8 *pkt)
1511  {
1512  	/* skip over ethernet header */
1513  	pkt += IRDMA_MAC_HLEN;
1514  
1515  	/* Skip over IP and TCP headers */
1516  	pkt += 4 * (pkt[0] & 0x0f);
1517  	pkt += 4 * ((pkt[12] >> 4) & 0x0f);
1518  
1519  	return pkt;
1520  }
1521  
1522  /**
1523   * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
1524   * @qp: sc qp ptr for pkt
1525   * @hdr: term hdr
1526   * @opcode: flush opcode for termhdr
1527   * @layer_etype: error layer + error type
1528   * @err: error cod ein the header
1529   */
irdma_bld_termhdr_ctrl(struct irdma_sc_qp * qp,struct irdma_terminate_hdr * hdr,enum irdma_flush_opcode opcode,u8 layer_etype,u8 err)1530  static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
1531  				   struct irdma_terminate_hdr *hdr,
1532  				   enum irdma_flush_opcode opcode,
1533  				   u8 layer_etype, u8 err)
1534  {
1535  	qp->flush_code = opcode;
1536  	hdr->layer_etype = layer_etype;
1537  	hdr->error_code = err;
1538  }
1539  
1540  /**
1541   * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
1542   * @pkt: ptr to mpa in offending pkt
1543   * @hdr: term hdr
1544   * @copy_len: offending pkt length to be copied to term hdr
1545   * @is_tagged: DDP tagged or untagged
1546   */
irdma_bld_termhdr_ddp_rdma(u8 * pkt,struct irdma_terminate_hdr * hdr,int * copy_len,u8 * is_tagged)1547  static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
1548  				       int *copy_len, u8 *is_tagged)
1549  {
1550  	u16 ddp_seg_len;
1551  
1552  	ddp_seg_len = ntohs(*(__be16 *)pkt);
1553  	if (ddp_seg_len) {
1554  		*copy_len = 2;
1555  		hdr->hdrct = DDP_LEN_FLAG;
1556  		if (pkt[2] & 0x80) {
1557  			*is_tagged = 1;
1558  			if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
1559  				*copy_len += TERM_DDP_LEN_TAGGED;
1560  				hdr->hdrct |= DDP_HDR_FLAG;
1561  			}
1562  		} else {
1563  			if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
1564  				*copy_len += TERM_DDP_LEN_UNTAGGED;
1565  				hdr->hdrct |= DDP_HDR_FLAG;
1566  			}
1567  			if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
1568  			    ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
1569  				*copy_len += TERM_RDMA_LEN;
1570  				hdr->hdrct |= RDMA_HDR_FLAG;
1571  			}
1572  		}
1573  	}
1574  }
1575  
1576  /**
1577   * irdma_bld_terminate_hdr - build terminate message header
1578   * @qp: qp associated with received terminate AE
1579   * @info: the struct contiaing AE information
1580   */
irdma_bld_terminate_hdr(struct irdma_sc_qp * qp,struct irdma_aeqe_info * info)1581  static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
1582  				   struct irdma_aeqe_info *info)
1583  {
1584  	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1585  	int copy_len = 0;
1586  	u8 is_tagged = 0;
1587  	u32 opcode;
1588  	struct irdma_terminate_hdr *termhdr;
1589  
1590  	termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
1591  	memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
1592  
1593  	if (info->q2_data_written) {
1594  		pkt = irdma_locate_mpa(pkt);
1595  		irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
1596  	}
1597  
1598  	opcode = irdma_iwarp_opcode(info, pkt);
1599  	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1600  	qp->sq_flush_code = info->sq;
1601  	qp->rq_flush_code = info->rq;
1602  
1603  	switch (info->ae_id) {
1604  	case IRDMA_AE_AMP_UNALLOCATED_STAG:
1605  		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1606  		if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
1607  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1608  					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1609  					       DDP_TAGGED_INV_STAG);
1610  		else
1611  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1612  					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1613  					       RDMAP_INV_STAG);
1614  		break;
1615  	case IRDMA_AE_AMP_BOUNDS_VIOLATION:
1616  		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1617  		if (info->q2_data_written)
1618  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1619  					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1620  					       DDP_TAGGED_BOUNDS);
1621  		else
1622  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1623  					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1624  					       RDMAP_INV_BOUNDS);
1625  		break;
1626  	case IRDMA_AE_AMP_BAD_PD:
1627  		switch (opcode) {
1628  		case IRDMA_OP_TYPE_RDMA_WRITE:
1629  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1630  					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1631  					       DDP_TAGGED_UNASSOC_STAG);
1632  			break;
1633  		case IRDMA_OP_TYPE_SEND_INV:
1634  		case IRDMA_OP_TYPE_SEND_SOL_INV:
1635  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1636  					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1637  					       RDMAP_CANT_INV_STAG);
1638  			break;
1639  		default:
1640  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1641  					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1642  					       RDMAP_UNASSOC_STAG);
1643  		}
1644  		break;
1645  	case IRDMA_AE_AMP_INVALID_STAG:
1646  		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1647  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1648  				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1649  				       RDMAP_INV_STAG);
1650  		break;
1651  	case IRDMA_AE_AMP_BAD_QP:
1652  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
1653  				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1654  				       DDP_UNTAGGED_INV_QN);
1655  		break;
1656  	case IRDMA_AE_AMP_BAD_STAG_KEY:
1657  	case IRDMA_AE_AMP_BAD_STAG_INDEX:
1658  		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1659  		switch (opcode) {
1660  		case IRDMA_OP_TYPE_SEND_INV:
1661  		case IRDMA_OP_TYPE_SEND_SOL_INV:
1662  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1663  					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1664  					       RDMAP_CANT_INV_STAG);
1665  			break;
1666  		default:
1667  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1668  					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1669  					       RDMAP_INV_STAG);
1670  		}
1671  		break;
1672  	case IRDMA_AE_AMP_RIGHTS_VIOLATION:
1673  	case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
1674  	case IRDMA_AE_PRIV_OPERATION_DENIED:
1675  		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1676  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1677  				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1678  				       RDMAP_ACCESS);
1679  		break;
1680  	case IRDMA_AE_AMP_TO_WRAP:
1681  		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1682  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1683  				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1684  				       RDMAP_TO_WRAP);
1685  		break;
1686  	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
1687  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1688  				       (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
1689  		break;
1690  	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
1691  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1692  				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1693  				       DDP_CATASTROPHIC_LOCAL);
1694  		break;
1695  	case IRDMA_AE_LCE_QP_CATASTROPHIC:
1696  	case IRDMA_AE_DDP_NO_L_BIT:
1697  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1698  				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1699  				       DDP_CATASTROPHIC_LOCAL);
1700  		break;
1701  	case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
1702  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1703  				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1704  				       DDP_UNTAGGED_INV_MSN_RANGE);
1705  		break;
1706  	case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
1707  		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1708  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1709  				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1710  				       DDP_UNTAGGED_INV_TOO_LONG);
1711  		break;
1712  	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
1713  		if (is_tagged)
1714  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1715  					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1716  					       DDP_TAGGED_INV_DDP_VER);
1717  		else
1718  			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1719  					       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1720  					       DDP_UNTAGGED_INV_DDP_VER);
1721  		break;
1722  	case IRDMA_AE_DDP_UBE_INVALID_MO:
1723  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1724  				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1725  				       DDP_UNTAGGED_INV_MO);
1726  		break;
1727  	case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
1728  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1729  				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1730  				       DDP_UNTAGGED_INV_MSN_NO_BUF);
1731  		break;
1732  	case IRDMA_AE_DDP_UBE_INVALID_QN:
1733  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1734  				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1735  				       DDP_UNTAGGED_INV_QN);
1736  		break;
1737  	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
1738  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1739  				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1740  				       RDMAP_INV_RDMAP_VER);
1741  		break;
1742  	default:
1743  		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1744  				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1745  				       RDMAP_UNSPECIFIED);
1746  		break;
1747  	}
1748  
1749  	if (copy_len)
1750  		memcpy(termhdr + 1, pkt, copy_len);
1751  
1752  	return sizeof(struct irdma_terminate_hdr) + copy_len;
1753  }
1754  
1755  /**
1756   * irdma_terminate_send_fin() - Send fin for terminate message
1757   * @qp: qp associated with received terminate AE
1758   */
irdma_terminate_send_fin(struct irdma_sc_qp * qp)1759  void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
1760  {
1761  	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1762  			     IRDMAQP_TERM_SEND_FIN_ONLY, 0);
1763  }
1764  
1765  /**
1766   * irdma_terminate_connection() - Bad AE and send terminate to remote QP
1767   * @qp: qp associated with received terminate AE
1768   * @info: the struct contiaing AE information
1769   */
irdma_terminate_connection(struct irdma_sc_qp * qp,struct irdma_aeqe_info * info)1770  void irdma_terminate_connection(struct irdma_sc_qp *qp,
1771  				struct irdma_aeqe_info *info)
1772  {
1773  	u8 termlen = 0;
1774  
1775  	if (qp->term_flags & IRDMA_TERM_SENT)
1776  		return;
1777  
1778  	termlen = irdma_bld_terminate_hdr(qp, info);
1779  	irdma_terminate_start_timer(qp);
1780  	qp->term_flags |= IRDMA_TERM_SENT;
1781  	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1782  			     IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
1783  }
1784  
1785  /**
1786   * irdma_terminate_received - handle terminate received AE
1787   * @qp: qp associated with received terminate AE
1788   * @info: the struct contiaing AE information
1789   */
irdma_terminate_received(struct irdma_sc_qp * qp,struct irdma_aeqe_info * info)1790  void irdma_terminate_received(struct irdma_sc_qp *qp,
1791  			      struct irdma_aeqe_info *info)
1792  {
1793  	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1794  	__be32 *mpa;
1795  	u8 ddp_ctl;
1796  	u8 rdma_ctl;
1797  	u16 aeq_id = 0;
1798  	struct irdma_terminate_hdr *termhdr;
1799  
1800  	mpa = (__be32 *)irdma_locate_mpa(pkt);
1801  	if (info->q2_data_written) {
1802  		/* did not validate the frame - do it now */
1803  		ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
1804  		rdma_ctl = ntohl(mpa[0]) & 0xff;
1805  		if ((ddp_ctl & 0xc0) != 0x40)
1806  			aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
1807  		else if ((ddp_ctl & 0x03) != 1)
1808  			aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
1809  		else if (ntohl(mpa[2]) != 2)
1810  			aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
1811  		else if (ntohl(mpa[3]) != 1)
1812  			aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
1813  		else if (ntohl(mpa[4]) != 0)
1814  			aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
1815  		else if ((rdma_ctl & 0xc0) != 0x40)
1816  			aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
1817  
1818  		info->ae_id = aeq_id;
1819  		if (info->ae_id) {
1820  			/* Bad terminate recvd - send back a terminate */
1821  			irdma_terminate_connection(qp, info);
1822  			return;
1823  		}
1824  	}
1825  
1826  	qp->term_flags |= IRDMA_TERM_RCVD;
1827  	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1828  	termhdr = (struct irdma_terminate_hdr *)&mpa[5];
1829  	if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
1830  	    termhdr->layer_etype == RDMAP_REMOTE_OP) {
1831  		irdma_terminate_done(qp, 0);
1832  	} else {
1833  		irdma_terminate_start_timer(qp);
1834  		irdma_terminate_send_fin(qp);
1835  	}
1836  }
1837  
irdma_null_ws_add(struct irdma_sc_vsi * vsi,u8 user_pri)1838  static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
1839  {
1840  	return 0;
1841  }
1842  
irdma_null_ws_remove(struct irdma_sc_vsi * vsi,u8 user_pri)1843  static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
1844  {
1845  	/* do nothing */
1846  }
1847  
irdma_null_ws_reset(struct irdma_sc_vsi * vsi)1848  static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
1849  {
1850  	/* do nothing */
1851  }
1852  
1853  /**
1854   * irdma_sc_vsi_init - Init the vsi structure
1855   * @vsi: pointer to vsi structure to initialize
1856   * @info: the info used to initialize the vsi struct
1857   */
irdma_sc_vsi_init(struct irdma_sc_vsi * vsi,struct irdma_vsi_init_info * info)1858  void irdma_sc_vsi_init(struct irdma_sc_vsi  *vsi,
1859  		       struct irdma_vsi_init_info *info)
1860  {
1861  	int i;
1862  
1863  	vsi->dev = info->dev;
1864  	vsi->back_vsi = info->back_vsi;
1865  	vsi->register_qset = info->register_qset;
1866  	vsi->unregister_qset = info->unregister_qset;
1867  	vsi->mtu = info->params->mtu;
1868  	vsi->exception_lan_q = info->exception_lan_q;
1869  	vsi->vsi_idx = info->pf_data_vsi_num;
1870  
1871  	irdma_set_qos_info(vsi, info->params);
1872  	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
1873  		mutex_init(&vsi->qos[i].qos_mutex);
1874  		INIT_LIST_HEAD(&vsi->qos[i].qplist);
1875  	}
1876  	if (vsi->register_qset) {
1877  		vsi->dev->ws_add = irdma_ws_add;
1878  		vsi->dev->ws_remove = irdma_ws_remove;
1879  		vsi->dev->ws_reset = irdma_ws_reset;
1880  	} else {
1881  		vsi->dev->ws_add = irdma_null_ws_add;
1882  		vsi->dev->ws_remove = irdma_null_ws_remove;
1883  		vsi->dev->ws_reset = irdma_null_ws_reset;
1884  	}
1885  }
1886  
1887  /**
1888   * irdma_get_stats_idx - Return stats index
1889   * @vsi: pointer to the vsi
1890   */
irdma_get_stats_idx(struct irdma_sc_vsi * vsi)1891  static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
1892  {
1893  	struct irdma_stats_inst_info stats_info = {};
1894  	struct irdma_sc_dev *dev = vsi->dev;
1895  	u8 i;
1896  
1897  	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1898  		if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
1899  					      &stats_info))
1900  			return stats_info.stats_idx;
1901  	}
1902  
1903  	for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) {
1904  		if (!dev->stats_idx_array[i]) {
1905  			dev->stats_idx_array[i] = true;
1906  			return i;
1907  		}
1908  	}
1909  
1910  	return IRDMA_INVALID_STATS_IDX;
1911  }
1912  
1913  /**
1914   * irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1
1915   * @vsi: vsi structure where hw_regs are set
1916   *
1917   * Populate the HW stats table
1918   */
irdma_hw_stats_init_gen1(struct irdma_sc_vsi * vsi)1919  static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi)
1920  {
1921  	struct irdma_sc_dev *dev = vsi->dev;
1922  	const struct irdma_hw_stat_map *map;
1923  	u64 *stat_reg = vsi->hw_stats_regs;
1924  	u64 *regs = dev->hw_stats_regs;
1925  	u16 i, stats_reg_set = vsi->stats_idx;
1926  
1927  	map = dev->hw_stats_map;
1928  
1929  	/* First 4 stat instances are reserved for port level statistics. */
1930  	stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0;
1931  
1932  	for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) {
1933  		if (map[i].bitmask <= IRDMA_MAX_STATS_32)
1934  			stat_reg[i] = regs[i] + stats_reg_set * sizeof(u32);
1935  		else
1936  			stat_reg[i] = regs[i] + stats_reg_set * sizeof(u64);
1937  	}
1938  }
1939  
1940  /**
1941   * irdma_vsi_stats_init - Initialize the vsi statistics
1942   * @vsi: pointer to the vsi structure
1943   * @info: The info structure used for initialization
1944   */
irdma_vsi_stats_init(struct irdma_sc_vsi * vsi,struct irdma_vsi_stats_info * info)1945  int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
1946  			 struct irdma_vsi_stats_info *info)
1947  {
1948  	struct irdma_dma_mem *stats_buff_mem;
1949  
1950  	vsi->pestat = info->pestat;
1951  	vsi->pestat->hw = vsi->dev->hw;
1952  	vsi->pestat->vsi = vsi;
1953  	stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
1954  	stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
1955  	stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
1956  						stats_buff_mem->size,
1957  						&stats_buff_mem->pa,
1958  						GFP_KERNEL);
1959  	if (!stats_buff_mem->va)
1960  		return -ENOMEM;
1961  
1962  	vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
1963  	vsi->pestat->gather_info.last_gather_stats_va =
1964  		(void *)((uintptr_t)stats_buff_mem->va +
1965  			 IRDMA_GATHER_STATS_BUF_SIZE);
1966  
1967  	irdma_hw_stats_start_timer(vsi);
1968  
1969  	/* when stat allocation is not required default to fcn_id. */
1970  	vsi->stats_idx = info->fcn_id;
1971  	if (info->alloc_stats_inst) {
1972  		u8 stats_idx = irdma_get_stats_idx(vsi);
1973  
1974  		if (stats_idx != IRDMA_INVALID_STATS_IDX) {
1975  			vsi->stats_inst_alloc = true;
1976  			vsi->stats_idx = stats_idx;
1977  			vsi->pestat->gather_info.use_stats_inst = true;
1978  			vsi->pestat->gather_info.stats_inst_index = stats_idx;
1979  		}
1980  	}
1981  
1982  	if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1983  		irdma_hw_stats_init_gen1(vsi);
1984  
1985  	return 0;
1986  }
1987  
1988  /**
1989   * irdma_vsi_stats_free - Free the vsi stats
1990   * @vsi: pointer to the vsi structure
1991   */
irdma_vsi_stats_free(struct irdma_sc_vsi * vsi)1992  void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
1993  {
1994  	struct irdma_stats_inst_info stats_info = {};
1995  	struct irdma_sc_dev *dev = vsi->dev;
1996  	u8 stats_idx = vsi->stats_idx;
1997  
1998  	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1999  		if (vsi->stats_inst_alloc) {
2000  			stats_info.stats_idx = vsi->stats_idx;
2001  			irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
2002  						 &stats_info);
2003  		}
2004  	} else {
2005  		if (vsi->stats_inst_alloc &&
2006  		    stats_idx < vsi->dev->hw_attrs.max_stat_inst)
2007  			vsi->dev->stats_idx_array[stats_idx] = false;
2008  	}
2009  
2010  	if (!vsi->pestat)
2011  		return;
2012  	irdma_hw_stats_stop_timer(vsi);
2013  	dma_free_coherent(vsi->pestat->hw->device,
2014  			  vsi->pestat->gather_info.stats_buff_mem.size,
2015  			  vsi->pestat->gather_info.stats_buff_mem.va,
2016  			  vsi->pestat->gather_info.stats_buff_mem.pa);
2017  	vsi->pestat->gather_info.stats_buff_mem.va = NULL;
2018  }
2019  
2020  /**
2021   * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
2022   * @wqsize: size of the wq (sq, rq) to encoded_size
2023   * @queue_type: queue type selected for the calculation algorithm
2024   */
irdma_get_encoded_wqe_size(u32 wqsize,enum irdma_queue_type queue_type)2025  u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
2026  {
2027  	u8 encoded_size = 0;
2028  
2029  	/* cqp sq's hw coded value starts from 1 for size of 4
2030  	 * while it starts from 0 for qp' wq's.
2031  	 */
2032  	if (queue_type == IRDMA_QUEUE_TYPE_CQP)
2033  		encoded_size = 1;
2034  	wqsize >>= 2;
2035  	while (wqsize >>= 1)
2036  		encoded_size++;
2037  
2038  	return encoded_size;
2039  }
2040  
2041  /**
2042   * irdma_sc_gather_stats - collect the statistics
2043   * @cqp: struct for cqp hw
2044   * @info: gather stats info structure
2045   * @scratch: u64 saved to be used during cqp completion
2046   */
irdma_sc_gather_stats(struct irdma_sc_cqp * cqp,struct irdma_stats_gather_info * info,u64 scratch)2047  static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
2048  				 struct irdma_stats_gather_info *info,
2049  				 u64 scratch)
2050  {
2051  	__le64 *wqe;
2052  	u64 temp;
2053  
2054  	if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
2055  		return -ENOMEM;
2056  
2057  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2058  	if (!wqe)
2059  		return -ENOMEM;
2060  
2061  	set_64bit_val(wqe, 40,
2062  		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
2063  	set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
2064  
2065  	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2066  	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
2067  	       FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
2068  			  info->stats_inst_index) |
2069  	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2070  			  info->use_hmc_fcn_index) |
2071  	       FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
2072  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2073  
2074  	set_64bit_val(wqe, 24, temp);
2075  
2076  	print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
2077  			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2078  
2079  	irdma_sc_cqp_post_sq(cqp);
2080  	ibdev_dbg(to_ibdev(cqp->dev),
2081  		  "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
2082  		  cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
2083  
2084  	return 0;
2085  }
2086  
2087  /**
2088   * irdma_sc_manage_stats_inst - allocate or free stats instance
2089   * @cqp: struct for cqp hw
2090   * @info: stats info structure
2091   * @alloc: alloc vs. delete flag
2092   * @scratch: u64 saved to be used during cqp completion
2093   */
irdma_sc_manage_stats_inst(struct irdma_sc_cqp * cqp,struct irdma_stats_inst_info * info,bool alloc,u64 scratch)2094  static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
2095  				      struct irdma_stats_inst_info *info,
2096  				      bool alloc, u64 scratch)
2097  {
2098  	__le64 *wqe;
2099  	u64 temp;
2100  
2101  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2102  	if (!wqe)
2103  		return -ENOMEM;
2104  
2105  	set_64bit_val(wqe, 40,
2106  		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
2107  	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2108  	       FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
2109  	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2110  			  info->use_hmc_fcn_index) |
2111  	       FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
2112  	       FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
2113  
2114  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2115  
2116  	set_64bit_val(wqe, 24, temp);
2117  
2118  	print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
2119  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2120  
2121  	irdma_sc_cqp_post_sq(cqp);
2122  	return 0;
2123  }
2124  
2125  /**
2126   * irdma_sc_set_up_map - set the up map table
2127   * @cqp: struct for cqp hw
2128   * @info: User priority map info
2129   * @scratch: u64 saved to be used during cqp completion
2130   */
irdma_sc_set_up_map(struct irdma_sc_cqp * cqp,struct irdma_up_info * info,u64 scratch)2131  static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
2132  			       struct irdma_up_info *info, u64 scratch)
2133  {
2134  	__le64 *wqe;
2135  	u64 temp = 0;
2136  	int i;
2137  
2138  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2139  	if (!wqe)
2140  		return -ENOMEM;
2141  
2142  	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
2143  		temp |= (u64)info->map[i] << (i * 8);
2144  
2145  	set_64bit_val(wqe, 0, temp);
2146  	set_64bit_val(wqe, 40,
2147  		      FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
2148  		      FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
2149  
2150  	temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
2151  	       FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
2152  	       FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
2153  			  info->use_cnp_up_override) |
2154  	       FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
2155  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2156  
2157  	set_64bit_val(wqe, 24, temp);
2158  
2159  	print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
2160  			     IRDMA_CQP_WQE_SIZE * 8, false);
2161  	irdma_sc_cqp_post_sq(cqp);
2162  
2163  	return 0;
2164  }
2165  
2166  /**
2167   * irdma_sc_manage_ws_node - create/modify/destroy WS node
2168   * @cqp: struct for cqp hw
2169   * @info: node info structure
2170   * @node_op: 0 for add 1 for modify, 2 for delete
2171   * @scratch: u64 saved to be used during cqp completion
2172   */
irdma_sc_manage_ws_node(struct irdma_sc_cqp * cqp,struct irdma_ws_node_info * info,enum irdma_ws_node_op node_op,u64 scratch)2173  static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
2174  				   struct irdma_ws_node_info *info,
2175  				   enum irdma_ws_node_op node_op, u64 scratch)
2176  {
2177  	__le64 *wqe;
2178  	u64 temp = 0;
2179  
2180  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2181  	if (!wqe)
2182  		return -ENOMEM;
2183  
2184  	set_64bit_val(wqe, 32,
2185  		      FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
2186  		      FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
2187  
2188  	temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
2189  	       FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
2190  	       FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
2191  	       FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
2192  	       FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
2193  	       FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
2194  	       FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
2195  	       FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
2196  	       FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
2197  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2198  
2199  	set_64bit_val(wqe, 24, temp);
2200  
2201  	print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
2202  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2203  	irdma_sc_cqp_post_sq(cqp);
2204  
2205  	return 0;
2206  }
2207  
2208  /**
2209   * irdma_sc_qp_flush_wqes - flush qp's wqe
2210   * @qp: sc qp
2211   * @info: dlush information
2212   * @scratch: u64 saved to be used during cqp completion
2213   * @post_sq: flag for cqp db to ring
2214   */
irdma_sc_qp_flush_wqes(struct irdma_sc_qp * qp,struct irdma_qp_flush_info * info,u64 scratch,bool post_sq)2215  int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
2216  			   struct irdma_qp_flush_info *info, u64 scratch,
2217  			   bool post_sq)
2218  {
2219  	u64 temp = 0;
2220  	__le64 *wqe;
2221  	struct irdma_sc_cqp *cqp;
2222  	u64 hdr;
2223  	bool flush_sq = false, flush_rq = false;
2224  
2225  	if (info->rq && !qp->flush_rq)
2226  		flush_rq = true;
2227  	if (info->sq && !qp->flush_sq)
2228  		flush_sq = true;
2229  	qp->flush_sq |= flush_sq;
2230  	qp->flush_rq |= flush_rq;
2231  
2232  	if (!flush_sq && !flush_rq) {
2233  		ibdev_dbg(to_ibdev(qp->dev),
2234  			  "CQP: Additional flush request ignored for qp %x\n",
2235  			  qp->qp_uk.qp_id);
2236  		return -EALREADY;
2237  	}
2238  
2239  	cqp = qp->pd->dev->cqp;
2240  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2241  	if (!wqe)
2242  		return -ENOMEM;
2243  
2244  	if (info->userflushcode) {
2245  		if (flush_rq)
2246  			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
2247  					   info->rq_minor_code) |
2248  				FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
2249  					   info->rq_major_code);
2250  		if (flush_sq)
2251  			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
2252  					   info->sq_minor_code) |
2253  				FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
2254  					   info->sq_major_code);
2255  	}
2256  	set_64bit_val(wqe, 16, temp);
2257  
2258  	temp = (info->generate_ae) ?
2259  		info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2260  					   info->ae_src) : 0;
2261  	set_64bit_val(wqe, 8, temp);
2262  
2263  	hdr = qp->qp_uk.qp_id |
2264  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
2265  	      FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
2266  	      FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
2267  	      FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
2268  	      FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
2269  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2270  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2271  
2272  	set_64bit_val(wqe, 24, hdr);
2273  
2274  	print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
2275  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2276  	if (post_sq)
2277  		irdma_sc_cqp_post_sq(cqp);
2278  
2279  	return 0;
2280  }
2281  
2282  /**
2283   * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
2284   * @qp: sc qp
2285   * @info: gen ae information
2286   * @scratch: u64 saved to be used during cqp completion
2287   * @post_sq: flag for cqp db to ring
2288   */
irdma_sc_gen_ae(struct irdma_sc_qp * qp,struct irdma_gen_ae_info * info,u64 scratch,bool post_sq)2289  static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
2290  			   struct irdma_gen_ae_info *info, u64 scratch,
2291  			   bool post_sq)
2292  {
2293  	u64 temp;
2294  	__le64 *wqe;
2295  	struct irdma_sc_cqp *cqp;
2296  	u64 hdr;
2297  
2298  	cqp = qp->pd->dev->cqp;
2299  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2300  	if (!wqe)
2301  		return -ENOMEM;
2302  
2303  	temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2304  					  info->ae_src);
2305  	set_64bit_val(wqe, 8, temp);
2306  
2307  	hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
2308  					   IRDMA_CQP_OP_GEN_AE) |
2309  	      FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
2310  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2311  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2312  
2313  	set_64bit_val(wqe, 24, hdr);
2314  
2315  	print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
2316  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2317  	if (post_sq)
2318  		irdma_sc_cqp_post_sq(cqp);
2319  
2320  	return 0;
2321  }
2322  
2323  /*** irdma_sc_qp_upload_context - upload qp's context
2324   * @dev: sc device struct
2325   * @info: upload context info ptr for return
2326   * @scratch: u64 saved to be used during cqp completion
2327   * @post_sq: flag for cqp db to ring
2328   */
irdma_sc_qp_upload_context(struct irdma_sc_dev * dev,struct irdma_upload_context_info * info,u64 scratch,bool post_sq)2329  static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
2330  				      struct irdma_upload_context_info *info,
2331  				      u64 scratch, bool post_sq)
2332  {
2333  	__le64 *wqe;
2334  	struct irdma_sc_cqp *cqp;
2335  	u64 hdr;
2336  
2337  	cqp = dev->cqp;
2338  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2339  	if (!wqe)
2340  		return -ENOMEM;
2341  
2342  	set_64bit_val(wqe, 16, info->buf_pa);
2343  
2344  	hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
2345  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
2346  	      FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
2347  	      FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
2348  	      FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
2349  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2350  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2351  
2352  	set_64bit_val(wqe, 24, hdr);
2353  
2354  	print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
2355  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2356  	if (post_sq)
2357  		irdma_sc_cqp_post_sq(cqp);
2358  
2359  	return 0;
2360  }
2361  
2362  /**
2363   * irdma_sc_manage_push_page - Handle push page
2364   * @cqp: struct for cqp hw
2365   * @info: push page info
2366   * @scratch: u64 saved to be used during cqp completion
2367   * @post_sq: flag for cqp db to ring
2368   */
irdma_sc_manage_push_page(struct irdma_sc_cqp * cqp,struct irdma_cqp_manage_push_page_info * info,u64 scratch,bool post_sq)2369  static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
2370  				     struct irdma_cqp_manage_push_page_info *info,
2371  				     u64 scratch, bool post_sq)
2372  {
2373  	__le64 *wqe;
2374  	u64 hdr;
2375  
2376  	if (info->free_page &&
2377  	    info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
2378  		return -EINVAL;
2379  
2380  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2381  	if (!wqe)
2382  		return -ENOMEM;
2383  
2384  	set_64bit_val(wqe, 16, info->qs_handle);
2385  	hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
2386  	      FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
2387  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
2388  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
2389  	      FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
2390  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2391  
2392  	set_64bit_val(wqe, 24, hdr);
2393  
2394  	print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
2395  			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2396  	if (post_sq)
2397  		irdma_sc_cqp_post_sq(cqp);
2398  
2399  	return 0;
2400  }
2401  
2402  /**
2403   * irdma_sc_suspend_qp - suspend qp for param change
2404   * @cqp: struct for cqp hw
2405   * @qp: sc qp struct
2406   * @scratch: u64 saved to be used during cqp completion
2407   */
irdma_sc_suspend_qp(struct irdma_sc_cqp * cqp,struct irdma_sc_qp * qp,u64 scratch)2408  static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2409  			       u64 scratch)
2410  {
2411  	u64 hdr;
2412  	__le64 *wqe;
2413  
2414  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2415  	if (!wqe)
2416  		return -ENOMEM;
2417  
2418  	hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
2419  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
2420  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2421  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2422  
2423  	set_64bit_val(wqe, 24, hdr);
2424  
2425  	print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
2426  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2427  	irdma_sc_cqp_post_sq(cqp);
2428  
2429  	return 0;
2430  }
2431  
2432  /**
2433   * irdma_sc_resume_qp - resume qp after suspend
2434   * @cqp: struct for cqp hw
2435   * @qp: sc qp struct
2436   * @scratch: u64 saved to be used during cqp completion
2437   */
irdma_sc_resume_qp(struct irdma_sc_cqp * cqp,struct irdma_sc_qp * qp,u64 scratch)2438  static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2439  			      u64 scratch)
2440  {
2441  	u64 hdr;
2442  	__le64 *wqe;
2443  
2444  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2445  	if (!wqe)
2446  		return -ENOMEM;
2447  
2448  	set_64bit_val(wqe, 16,
2449  		      FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
2450  
2451  	hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
2452  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
2453  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2454  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2455  
2456  	set_64bit_val(wqe, 24, hdr);
2457  
2458  	print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
2459  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2460  	irdma_sc_cqp_post_sq(cqp);
2461  
2462  	return 0;
2463  }
2464  
2465  /**
2466   * irdma_sc_cq_ack - acknowledge completion q
2467   * @cq: cq struct
2468   */
irdma_sc_cq_ack(struct irdma_sc_cq * cq)2469  static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
2470  {
2471  	writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
2472  }
2473  
2474  /**
2475   * irdma_sc_cq_init - initialize completion q
2476   * @cq: cq struct
2477   * @info: cq initialization info
2478   */
irdma_sc_cq_init(struct irdma_sc_cq * cq,struct irdma_cq_init_info * info)2479  int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
2480  {
2481  	u32 pble_obj_cnt;
2482  
2483  	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2484  	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
2485  		return -EINVAL;
2486  
2487  	cq->cq_pa = info->cq_base_pa;
2488  	cq->dev = info->dev;
2489  	cq->ceq_id = info->ceq_id;
2490  	info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
2491  	info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
2492  	irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
2493  
2494  	cq->virtual_map = info->virtual_map;
2495  	cq->pbl_chunk_size = info->pbl_chunk_size;
2496  	cq->ceqe_mask = info->ceqe_mask;
2497  	cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
2498  	cq->shadow_area_pa = info->shadow_area_pa;
2499  	cq->shadow_read_threshold = info->shadow_read_threshold;
2500  	cq->ceq_id_valid = info->ceq_id_valid;
2501  	cq->tph_en = info->tph_en;
2502  	cq->tph_val = info->tph_val;
2503  	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2504  	cq->vsi = info->vsi;
2505  
2506  	return 0;
2507  }
2508  
2509  /**
2510   * irdma_sc_cq_create - create completion q
2511   * @cq: cq struct
2512   * @scratch: u64 saved to be used during cqp completion
2513   * @check_overflow: flag for overflow check
2514   * @post_sq: flag for cqp db to ring
2515   */
irdma_sc_cq_create(struct irdma_sc_cq * cq,u64 scratch,bool check_overflow,bool post_sq)2516  static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
2517  			      bool check_overflow, bool post_sq)
2518  {
2519  	__le64 *wqe;
2520  	struct irdma_sc_cqp *cqp;
2521  	u64 hdr;
2522  	struct irdma_sc_ceq *ceq;
2523  	int ret_code = 0;
2524  
2525  	cqp = cq->dev->cqp;
2526  	if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
2527  		return -EINVAL;
2528  
2529  	if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
2530  		return -EINVAL;
2531  
2532  	ceq = cq->dev->ceq[cq->ceq_id];
2533  	if (ceq && ceq->reg_cq)
2534  		ret_code = irdma_sc_add_cq_ctx(ceq, cq);
2535  
2536  	if (ret_code)
2537  		return ret_code;
2538  
2539  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2540  	if (!wqe) {
2541  		if (ceq && ceq->reg_cq)
2542  			irdma_sc_remove_cq_ctx(ceq, cq);
2543  		return -ENOMEM;
2544  	}
2545  
2546  	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2547  	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2548  	set_64bit_val(wqe, 16,
2549  		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
2550  	set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2551  	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2552  	set_64bit_val(wqe, 48,
2553  		      FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
2554  	set_64bit_val(wqe, 56,
2555  		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2556  		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2557  
2558  	hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
2559  	      FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2560  			IRDMA_CQPSQ_CQ_CEQID) |
2561  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
2562  	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2563  	      FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
2564  	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2565  	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2566  	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2567  	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2568  	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2569  			 cq->cq_uk.avoid_mem_cflct) |
2570  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2571  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2572  
2573  	set_64bit_val(wqe, 24, hdr);
2574  
2575  	print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
2576  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2577  	if (post_sq)
2578  		irdma_sc_cqp_post_sq(cqp);
2579  
2580  	return 0;
2581  }
2582  
2583  /**
2584   * irdma_sc_cq_destroy - destroy completion q
2585   * @cq: cq struct
2586   * @scratch: u64 saved to be used during cqp completion
2587   * @post_sq: flag for cqp db to ring
2588   */
irdma_sc_cq_destroy(struct irdma_sc_cq * cq,u64 scratch,bool post_sq)2589  int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
2590  {
2591  	struct irdma_sc_cqp *cqp;
2592  	__le64 *wqe;
2593  	u64 hdr;
2594  	struct irdma_sc_ceq *ceq;
2595  
2596  	cqp = cq->dev->cqp;
2597  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2598  	if (!wqe)
2599  		return -ENOMEM;
2600  
2601  	ceq = cq->dev->ceq[cq->ceq_id];
2602  	if (ceq && ceq->reg_cq)
2603  		irdma_sc_remove_cq_ctx(ceq, cq);
2604  
2605  	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2606  	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2607  	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2608  	set_64bit_val(wqe, 48,
2609  		      (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2610  
2611  	hdr = cq->cq_uk.cq_id |
2612  	      FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2613  			IRDMA_CQPSQ_CQ_CEQID) |
2614  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
2615  	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2616  	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2617  	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2618  	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2619  	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2620  	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
2621  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2622  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2623  
2624  	set_64bit_val(wqe, 24, hdr);
2625  
2626  	print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
2627  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2628  	if (post_sq)
2629  		irdma_sc_cqp_post_sq(cqp);
2630  
2631  	return 0;
2632  }
2633  
2634  /**
2635   * irdma_sc_cq_resize - set resized cq buffer info
2636   * @cq: resized cq
2637   * @info: resized cq buffer info
2638   */
irdma_sc_cq_resize(struct irdma_sc_cq * cq,struct irdma_modify_cq_info * info)2639  void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
2640  {
2641  	cq->virtual_map = info->virtual_map;
2642  	cq->cq_pa = info->cq_pa;
2643  	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2644  	cq->pbl_chunk_size = info->pbl_chunk_size;
2645  	irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
2646  }
2647  
2648  /**
2649   * irdma_sc_cq_modify - modify a Completion Queue
2650   * @cq: cq struct
2651   * @info: modification info struct
2652   * @scratch: u64 saved to be used during cqp completion
2653   * @post_sq: flag to post to sq
2654   */
irdma_sc_cq_modify(struct irdma_sc_cq * cq,struct irdma_modify_cq_info * info,u64 scratch,bool post_sq)2655  static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
2656  			      struct irdma_modify_cq_info *info, u64 scratch,
2657  			      bool post_sq)
2658  {
2659  	struct irdma_sc_cqp *cqp;
2660  	__le64 *wqe;
2661  	u64 hdr;
2662  	u32 pble_obj_cnt;
2663  
2664  	pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2665  	if (info->cq_resize && info->virtual_map &&
2666  	    info->first_pm_pbl_idx >= pble_obj_cnt)
2667  		return -EINVAL;
2668  
2669  	cqp = cq->dev->cqp;
2670  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2671  	if (!wqe)
2672  		return -ENOMEM;
2673  
2674  	set_64bit_val(wqe, 0, info->cq_size);
2675  	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2676  	set_64bit_val(wqe, 16,
2677  		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
2678  	set_64bit_val(wqe, 32, info->cq_pa);
2679  	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2680  	set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
2681  	set_64bit_val(wqe, 56,
2682  		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2683  		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2684  
2685  	hdr = cq->cq_uk.cq_id |
2686  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
2687  	      FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
2688  	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
2689  	      FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
2690  	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
2691  	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2692  	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2693  	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2694  			 cq->cq_uk.avoid_mem_cflct) |
2695  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2696  	dma_wmb(); /* make sure WQE is written before valid bit is set */
2697  
2698  	set_64bit_val(wqe, 24, hdr);
2699  
2700  	print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
2701  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2702  	if (post_sq)
2703  		irdma_sc_cqp_post_sq(cqp);
2704  
2705  	return 0;
2706  }
2707  
2708  /**
2709   * irdma_check_cqp_progress - check cqp processing progress
2710   * @timeout: timeout info struct
2711   * @dev: sc device struct
2712   */
irdma_check_cqp_progress(struct irdma_cqp_timeout * timeout,struct irdma_sc_dev * dev)2713  void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
2714  {
2715  	u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
2716  
2717  	if (timeout->compl_cqp_cmds != completed_ops) {
2718  		timeout->compl_cqp_cmds = completed_ops;
2719  		timeout->count = 0;
2720  	} else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
2721  		timeout->count++;
2722  	}
2723  }
2724  
2725  /**
2726   * irdma_get_cqp_reg_info - get head and tail for cqp using registers
2727   * @cqp: struct for cqp hw
2728   * @val: cqp tail register value
2729   * @tail: wqtail register value
2730   * @error: cqp processing err
2731   */
irdma_get_cqp_reg_info(struct irdma_sc_cqp * cqp,u32 * val,u32 * tail,u32 * error)2732  static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
2733  					  u32 *tail, u32 *error)
2734  {
2735  	*val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
2736  	*tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
2737  	*error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
2738  }
2739  
2740  /**
2741   * irdma_cqp_poll_registers - poll cqp registers
2742   * @cqp: struct for cqp hw
2743   * @tail: wqtail register value
2744   * @count: how many times to try for completion
2745   */
irdma_cqp_poll_registers(struct irdma_sc_cqp * cqp,u32 tail,u32 count)2746  static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
2747  				    u32 count)
2748  {
2749  	u32 i = 0;
2750  	u32 newtail, error, val;
2751  
2752  	while (i++ < count) {
2753  		irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
2754  		if (error) {
2755  			error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
2756  			ibdev_dbg(to_ibdev(cqp->dev),
2757  				  "CQP: CQPERRCODES error_code[x%08X]\n",
2758  				  error);
2759  			return -EIO;
2760  		}
2761  		if (newtail != tail) {
2762  			/* SUCCESS */
2763  			IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
2764  			atomic64_inc(&cqp->completed_ops);
2765  			return 0;
2766  		}
2767  		udelay(cqp->dev->hw_attrs.max_sleep_count);
2768  	}
2769  
2770  	return -ETIMEDOUT;
2771  }
2772  
2773  /**
2774   * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
2775   * @dev: sc device struct
2776   * @buf: pointer to commit buffer
2777   * @buf_idx: buffer index
2778   * @obj_info: object info pointer
2779   * @rsrc_idx: indexs of memory resource
2780   */
irdma_sc_decode_fpm_commit(struct irdma_sc_dev * dev,__le64 * buf,u32 buf_idx,struct irdma_hmc_obj_info * obj_info,u32 rsrc_idx)2781  static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
2782  				      u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
2783  				      u32 rsrc_idx)
2784  {
2785  	u64 temp;
2786  
2787  	get_64bit_val(buf, buf_idx, &temp);
2788  
2789  	switch (rsrc_idx) {
2790  	case IRDMA_HMC_IW_QP:
2791  		obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
2792  		break;
2793  	case IRDMA_HMC_IW_CQ:
2794  		obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
2795  		break;
2796  	case IRDMA_HMC_IW_APBVT_ENTRY:
2797  		obj_info[rsrc_idx].cnt = 1;
2798  		break;
2799  	default:
2800  		obj_info[rsrc_idx].cnt = (u32)temp;
2801  		break;
2802  	}
2803  
2804  	obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
2805  
2806  	return temp;
2807  }
2808  
2809  /**
2810   * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
2811   * @dev: pointer to dev struct
2812   * @buf: ptr to fpm commit buffer
2813   * @info: ptr to irdma_hmc_obj_info struct
2814   * @sd: number of SDs for HMC objects
2815   *
2816   * parses fpm commit info and copy base value
2817   * of hmc objects in hmc_info
2818   */
2819  static void
irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev * dev,__le64 * buf,struct irdma_hmc_obj_info * info,u32 * sd)2820  irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
2821  			      struct irdma_hmc_obj_info *info, u32 *sd)
2822  {
2823  	u64 size;
2824  	u32 i;
2825  	u64 max_base = 0;
2826  	u32 last_hmc_obj = 0;
2827  
2828  	irdma_sc_decode_fpm_commit(dev, buf, 0, info,
2829  				   IRDMA_HMC_IW_QP);
2830  	irdma_sc_decode_fpm_commit(dev, buf, 8, info,
2831  				   IRDMA_HMC_IW_CQ);
2832  	/* skiping RSRVD */
2833  	irdma_sc_decode_fpm_commit(dev, buf, 24, info,
2834  				   IRDMA_HMC_IW_HTE);
2835  	irdma_sc_decode_fpm_commit(dev, buf, 32, info,
2836  				   IRDMA_HMC_IW_ARP);
2837  	irdma_sc_decode_fpm_commit(dev, buf, 40, info,
2838  				   IRDMA_HMC_IW_APBVT_ENTRY);
2839  	irdma_sc_decode_fpm_commit(dev, buf, 48, info,
2840  				   IRDMA_HMC_IW_MR);
2841  	irdma_sc_decode_fpm_commit(dev, buf, 56, info,
2842  				   IRDMA_HMC_IW_XF);
2843  	irdma_sc_decode_fpm_commit(dev, buf, 64, info,
2844  				   IRDMA_HMC_IW_XFFL);
2845  	irdma_sc_decode_fpm_commit(dev, buf, 72, info,
2846  				   IRDMA_HMC_IW_Q1);
2847  	irdma_sc_decode_fpm_commit(dev, buf, 80, info,
2848  				   IRDMA_HMC_IW_Q1FL);
2849  	irdma_sc_decode_fpm_commit(dev, buf, 88, info,
2850  				   IRDMA_HMC_IW_TIMER);
2851  	irdma_sc_decode_fpm_commit(dev, buf, 112, info,
2852  				   IRDMA_HMC_IW_PBLE);
2853  	/* skipping RSVD. */
2854  	if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
2855  		irdma_sc_decode_fpm_commit(dev, buf, 96, info,
2856  					   IRDMA_HMC_IW_FSIMC);
2857  		irdma_sc_decode_fpm_commit(dev, buf, 104, info,
2858  					   IRDMA_HMC_IW_FSIAV);
2859  		irdma_sc_decode_fpm_commit(dev, buf, 128, info,
2860  					   IRDMA_HMC_IW_RRF);
2861  		irdma_sc_decode_fpm_commit(dev, buf, 136, info,
2862  					   IRDMA_HMC_IW_RRFFL);
2863  		irdma_sc_decode_fpm_commit(dev, buf, 144, info,
2864  					   IRDMA_HMC_IW_HDR);
2865  		irdma_sc_decode_fpm_commit(dev, buf, 152, info,
2866  					   IRDMA_HMC_IW_MD);
2867  		irdma_sc_decode_fpm_commit(dev, buf, 160, info,
2868  					   IRDMA_HMC_IW_OOISC);
2869  		irdma_sc_decode_fpm_commit(dev, buf, 168, info,
2870  					   IRDMA_HMC_IW_OOISCFFL);
2871  	}
2872  
2873  	/* searching for the last object in HMC to find the size of the HMC area. */
2874  	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
2875  		if (info[i].base > max_base) {
2876  			max_base = info[i].base;
2877  			last_hmc_obj = i;
2878  		}
2879  	}
2880  
2881  	size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
2882  	       info[last_hmc_obj].base;
2883  
2884  	if (size & 0x1FFFFF)
2885  		*sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
2886  	else
2887  		*sd = (u32)(size >> 21);
2888  
2889  }
2890  
2891  /**
2892   * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
2893   * @buf: ptr to fpm query buffer
2894   * @buf_idx: index into buf
2895   * @obj_info: ptr to irdma_hmc_obj_info struct
2896   * @rsrc_idx: resource index into info
2897   *
2898   * Decode a 64 bit value from fpm query buffer into max count and size
2899   */
irdma_sc_decode_fpm_query(__le64 * buf,u32 buf_idx,struct irdma_hmc_obj_info * obj_info,u32 rsrc_idx)2900  static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
2901  				     struct irdma_hmc_obj_info *obj_info,
2902  				     u32 rsrc_idx)
2903  {
2904  	u64 temp;
2905  	u32 size;
2906  
2907  	get_64bit_val(buf, buf_idx, &temp);
2908  	obj_info[rsrc_idx].max_cnt = (u32)temp;
2909  	size = (u32)(temp >> 32);
2910  	obj_info[rsrc_idx].size = BIT_ULL(size);
2911  
2912  	return temp;
2913  }
2914  
2915  /**
2916   * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
2917   * @dev: ptr to shared code device
2918   * @buf: ptr to fpm query buffer
2919   * @hmc_info: ptr to irdma_hmc_obj_info struct
2920   * @hmc_fpm_misc: ptr to fpm data
2921   *
2922   * parses fpm query buffer and copy max_cnt and
2923   * size value of hmc objects in hmc_info
2924   */
irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev * dev,__le64 * buf,struct irdma_hmc_info * hmc_info,struct irdma_hmc_fpm_misc * hmc_fpm_misc)2925  static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
2926  					struct irdma_hmc_info *hmc_info,
2927  					struct irdma_hmc_fpm_misc *hmc_fpm_misc)
2928  {
2929  	struct irdma_hmc_obj_info *obj_info;
2930  	u64 temp;
2931  	u32 size;
2932  	u16 max_pe_sds;
2933  
2934  	obj_info = hmc_info->hmc_obj;
2935  
2936  	get_64bit_val(buf, 0, &temp);
2937  	hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
2938  	max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
2939  
2940  	hmc_fpm_misc->max_sds = max_pe_sds;
2941  	hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
2942  	get_64bit_val(buf, 8, &temp);
2943  	obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
2944  	size = (u32)(temp >> 32);
2945  	obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
2946  
2947  	get_64bit_val(buf, 16, &temp);
2948  	obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
2949  	size = (u32)(temp >> 32);
2950  	obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
2951  
2952  	irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
2953  	irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
2954  
2955  	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
2956  	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
2957  
2958  	irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
2959  	irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
2960  
2961  	get_64bit_val(buf, 64, &temp);
2962  	obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
2963  	obj_info[IRDMA_HMC_IW_XFFL].size = 4;
2964  	hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
2965  	if (!hmc_fpm_misc->xf_block_size)
2966  		return -EINVAL;
2967  
2968  	irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
2969  	get_64bit_val(buf, 80, &temp);
2970  	obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
2971  	obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
2972  
2973  	hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
2974  	if (!hmc_fpm_misc->q1_block_size)
2975  		return -EINVAL;
2976  
2977  	irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
2978  
2979  	get_64bit_val(buf, 112, &temp);
2980  	obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
2981  	obj_info[IRDMA_HMC_IW_PBLE].size = 8;
2982  
2983  	get_64bit_val(buf, 120, &temp);
2984  	hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
2985  	hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
2986  	hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
2987  	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
2988  		return 0;
2989  	irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
2990  	irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
2991  	irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
2992  
2993  	get_64bit_val(buf, 136, &temp);
2994  	obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
2995  	obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
2996  	hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
2997  	if (!hmc_fpm_misc->rrf_block_size &&
2998  	    obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
2999  		return -EINVAL;
3000  
3001  	irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
3002  	irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
3003  	irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
3004  
3005  	get_64bit_val(buf, 168, &temp);
3006  	obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
3007  	obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
3008  	hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
3009  	if (!hmc_fpm_misc->ooiscf_block_size &&
3010  	    obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
3011  		return -EINVAL;
3012  
3013  	return 0;
3014  }
3015  
3016  /**
3017   * irdma_sc_find_reg_cq - find cq ctx index
3018   * @ceq: ceq sc structure
3019   * @cq: cq sc structure
3020   */
irdma_sc_find_reg_cq(struct irdma_sc_ceq * ceq,struct irdma_sc_cq * cq)3021  static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
3022  				struct irdma_sc_cq *cq)
3023  {
3024  	u32 i;
3025  
3026  	for (i = 0; i < ceq->reg_cq_size; i++) {
3027  		if (cq == ceq->reg_cq[i])
3028  			return i;
3029  	}
3030  
3031  	return IRDMA_INVALID_CQ_IDX;
3032  }
3033  
3034  /**
3035   * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
3036   * @ceq: ceq sc structure
3037   * @cq: cq sc structure
3038   */
irdma_sc_add_cq_ctx(struct irdma_sc_ceq * ceq,struct irdma_sc_cq * cq)3039  int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
3040  {
3041  	unsigned long flags;
3042  
3043  	spin_lock_irqsave(&ceq->req_cq_lock, flags);
3044  
3045  	if (ceq->reg_cq_size == ceq->elem_cnt) {
3046  		spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3047  		return -ENOMEM;
3048  	}
3049  
3050  	ceq->reg_cq[ceq->reg_cq_size++] = cq;
3051  
3052  	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3053  
3054  	return 0;
3055  }
3056  
3057  /**
3058   * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
3059   * @ceq: ceq sc structure
3060   * @cq: cq sc structure
3061   */
irdma_sc_remove_cq_ctx(struct irdma_sc_ceq * ceq,struct irdma_sc_cq * cq)3062  void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
3063  {
3064  	unsigned long flags;
3065  	u32 cq_ctx_idx;
3066  
3067  	spin_lock_irqsave(&ceq->req_cq_lock, flags);
3068  	cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
3069  	if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
3070  		goto exit;
3071  
3072  	ceq->reg_cq_size--;
3073  	if (cq_ctx_idx != ceq->reg_cq_size)
3074  		ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
3075  	ceq->reg_cq[ceq->reg_cq_size] = NULL;
3076  
3077  exit:
3078  	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3079  }
3080  
3081  /**
3082   * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
3083   * @cqp: IWARP control queue pair pointer
3084   * @info: IWARP control queue pair init info pointer
3085   *
3086   * Initializes the object and context buffers for a control Queue Pair.
3087   */
irdma_sc_cqp_init(struct irdma_sc_cqp * cqp,struct irdma_cqp_init_info * info)3088  int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
3089  		      struct irdma_cqp_init_info *info)
3090  {
3091  	u8 hw_sq_size;
3092  
3093  	if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
3094  	    info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
3095  	    ((info->sq_size & (info->sq_size - 1))))
3096  		return -EINVAL;
3097  
3098  	hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
3099  						IRDMA_QUEUE_TYPE_CQP);
3100  	cqp->size = sizeof(*cqp);
3101  	cqp->sq_size = info->sq_size;
3102  	cqp->hw_sq_size = hw_sq_size;
3103  	cqp->sq_base = info->sq;
3104  	cqp->host_ctx = info->host_ctx;
3105  	cqp->sq_pa = info->sq_pa;
3106  	cqp->host_ctx_pa = info->host_ctx_pa;
3107  	cqp->dev = info->dev;
3108  	cqp->struct_ver = info->struct_ver;
3109  	cqp->hw_maj_ver = info->hw_maj_ver;
3110  	cqp->hw_min_ver = info->hw_min_ver;
3111  	cqp->scratch_array = info->scratch_array;
3112  	cqp->polarity = 0;
3113  	cqp->en_datacenter_tcp = info->en_datacenter_tcp;
3114  	cqp->ena_vf_count = info->ena_vf_count;
3115  	cqp->hmc_profile = info->hmc_profile;
3116  	cqp->ceqs_per_vf = info->ceqs_per_vf;
3117  	cqp->disable_packed = info->disable_packed;
3118  	cqp->rocev2_rto_policy = info->rocev2_rto_policy;
3119  	cqp->protocol_used = info->protocol_used;
3120  	memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
3121  	info->dev->cqp = cqp;
3122  
3123  	IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
3124  	cqp->requested_ops = 0;
3125  	atomic64_set(&cqp->completed_ops, 0);
3126  	/* for the cqp commands backlog. */
3127  	INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
3128  
3129  	writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
3130  	writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
3131  	writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3132  
3133  	ibdev_dbg(to_ibdev(cqp->dev),
3134  		  "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
3135  		  cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
3136  		  (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
3137  	return 0;
3138  }
3139  
3140  /**
3141   * irdma_sc_cqp_create - create cqp during bringup
3142   * @cqp: struct for cqp hw
3143   * @maj_err: If error, major err number
3144   * @min_err: If error, minor err number
3145   */
irdma_sc_cqp_create(struct irdma_sc_cqp * cqp,u16 * maj_err,u16 * min_err)3146  int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
3147  {
3148  	u64 temp;
3149  	u8 hw_rev;
3150  	u32 cnt = 0, p1, p2, val = 0, err_code;
3151  	int ret_code;
3152  
3153  	hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
3154  	cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
3155  				IRDMA_SD_BUF_ALIGNMENT);
3156  	cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
3157  					   cqp->sdbuf.size, &cqp->sdbuf.pa,
3158  					   GFP_KERNEL);
3159  	if (!cqp->sdbuf.va)
3160  		return -ENOMEM;
3161  
3162  	spin_lock_init(&cqp->dev->cqp_lock);
3163  
3164  	temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
3165  	       FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
3166  	       FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
3167  	       FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
3168  	if (hw_rev >= IRDMA_GEN_2) {
3169  		temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
3170  				   cqp->rocev2_rto_policy) |
3171  			FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
3172  				   cqp->protocol_used);
3173  	}
3174  
3175  	set_64bit_val(cqp->host_ctx, 0, temp);
3176  	set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
3177  
3178  	temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
3179  	       FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
3180  	set_64bit_val(cqp->host_ctx, 16, temp);
3181  	set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
3182  	temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
3183  	       FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
3184  	if (hw_rev >= IRDMA_GEN_2) {
3185  		temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
3186  			FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
3187  	}
3188  	set_64bit_val(cqp->host_ctx, 32, temp);
3189  	set_64bit_val(cqp->host_ctx, 40, 0);
3190  	temp = 0;
3191  	if (hw_rev >= IRDMA_GEN_2) {
3192  		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
3193  			FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
3194  			FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
3195  	}
3196  	set_64bit_val(cqp->host_ctx, 48, temp);
3197  	temp = 0;
3198  	if (hw_rev >= IRDMA_GEN_2) {
3199  		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
3200  			FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
3201  			FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
3202  			FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
3203  	}
3204  	set_64bit_val(cqp->host_ctx, 56, temp);
3205  	print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
3206  			     8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
3207  	p1 = cqp->host_ctx_pa >> 32;
3208  	p2 = (u32)cqp->host_ctx_pa;
3209  
3210  	writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3211  	writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3212  
3213  	do {
3214  		if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3215  			ret_code = -ETIMEDOUT;
3216  			goto err;
3217  		}
3218  		udelay(cqp->dev->hw_attrs.max_sleep_count);
3219  		val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3220  	} while (!val);
3221  
3222  	if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
3223  		ret_code = -EOPNOTSUPP;
3224  		goto err;
3225  	}
3226  
3227  	cqp->process_cqp_sds = irdma_update_sds_noccq;
3228  	return 0;
3229  
3230  err:
3231  	dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3232  			  cqp->sdbuf.va, cqp->sdbuf.pa);
3233  	cqp->sdbuf.va = NULL;
3234  	err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3235  	*min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
3236  	*maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
3237  	return ret_code;
3238  }
3239  
3240  /**
3241   * irdma_sc_cqp_post_sq - post of cqp's sq
3242   * @cqp: struct for cqp hw
3243   */
irdma_sc_cqp_post_sq(struct irdma_sc_cqp * cqp)3244  void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
3245  {
3246  	writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
3247  
3248  	ibdev_dbg(to_ibdev(cqp->dev),
3249  		  "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
3250  		  cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
3251  }
3252  
3253  /**
3254   * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
3255   * and pass back index
3256   * @cqp: CQP HW structure
3257   * @scratch: private data for CQP WQE
3258   * @wqe_idx: WQE index of CQP SQ
3259   */
irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp * cqp,u64 scratch,u32 * wqe_idx)3260  __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
3261  					   u32 *wqe_idx)
3262  {
3263  	__le64 *wqe = NULL;
3264  	int ret_code;
3265  
3266  	if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
3267  		ibdev_dbg(to_ibdev(cqp->dev),
3268  			  "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
3269  			  cqp->sq_ring.head, cqp->sq_ring.tail,
3270  			  cqp->sq_ring.size);
3271  		return NULL;
3272  	}
3273  	IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
3274  	if (ret_code)
3275  		return NULL;
3276  
3277  	cqp->requested_ops++;
3278  	if (!*wqe_idx)
3279  		cqp->polarity = !cqp->polarity;
3280  	wqe = cqp->sq_base[*wqe_idx].elem;
3281  	cqp->scratch_array[*wqe_idx] = scratch;
3282  	IRDMA_CQP_INIT_WQE(wqe);
3283  
3284  	return wqe;
3285  }
3286  
3287  /**
3288   * irdma_sc_cqp_destroy - destroy cqp during close
3289   * @cqp: struct for cqp hw
3290   */
irdma_sc_cqp_destroy(struct irdma_sc_cqp * cqp)3291  int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
3292  {
3293  	u32 cnt = 0, val;
3294  	int ret_code = 0;
3295  
3296  	writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3297  	writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3298  	do {
3299  		if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3300  			ret_code = -ETIMEDOUT;
3301  			break;
3302  		}
3303  		udelay(cqp->dev->hw_attrs.max_sleep_count);
3304  		val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3305  	} while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
3306  
3307  	dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3308  			  cqp->sdbuf.va, cqp->sdbuf.pa);
3309  	cqp->sdbuf.va = NULL;
3310  	return ret_code;
3311  }
3312  
3313  /**
3314   * irdma_sc_ccq_arm - enable intr for control cq
3315   * @ccq: ccq sc struct
3316   */
irdma_sc_ccq_arm(struct irdma_sc_cq * ccq)3317  void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
3318  {
3319  	u64 temp_val;
3320  	u16 sw_cq_sel;
3321  	u8 arm_next_se;
3322  	u8 arm_seq_num;
3323  
3324  	get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
3325  	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
3326  	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
3327  	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
3328  	arm_seq_num++;
3329  	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
3330  		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
3331  		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
3332  		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
3333  	set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
3334  
3335  	dma_wmb(); /* make sure shadow area is updated before arming */
3336  
3337  	writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
3338  }
3339  
3340  /**
3341   * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
3342   * @ccq: ccq sc struct
3343   * @info: completion q entry to return
3344   */
irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq * ccq,struct irdma_ccq_cqe_info * info)3345  int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
3346  			      struct irdma_ccq_cqe_info *info)
3347  {
3348  	u64 qp_ctx, temp, temp1;
3349  	__le64 *cqe;
3350  	struct irdma_sc_cqp *cqp;
3351  	u32 wqe_idx;
3352  	u32 error;
3353  	u8 polarity;
3354  	int ret_code = 0;
3355  
3356  	if (ccq->cq_uk.avoid_mem_cflct)
3357  		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
3358  	else
3359  		cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
3360  
3361  	get_64bit_val(cqe, 24, &temp);
3362  	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
3363  	if (polarity != ccq->cq_uk.polarity)
3364  		return -ENOENT;
3365  
3366  	/* Ensure CEQE contents are read after valid bit is checked */
3367  	dma_rmb();
3368  
3369  	get_64bit_val(cqe, 8, &qp_ctx);
3370  	cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
3371  	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
3372  	info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
3373  	info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
3374  	if (info->error) {
3375  		info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
3376  		error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3377  		ibdev_dbg(to_ibdev(cqp->dev),
3378  			  "CQP: CQPERRCODES error_code[x%08X]\n", error);
3379  	}
3380  
3381  	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
3382  	info->scratch = cqp->scratch_array[wqe_idx];
3383  
3384  	get_64bit_val(cqe, 16, &temp1);
3385  	info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
3386  	get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
3387  	info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
3388  	info->cqp = cqp;
3389  
3390  	/*  move the head for cq */
3391  	IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
3392  	if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
3393  		ccq->cq_uk.polarity ^= 1;
3394  
3395  	/* update cq tail in cq shadow memory also */
3396  	IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
3397  	set_64bit_val(ccq->cq_uk.shadow_area, 0,
3398  		      IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
3399  
3400  	dma_wmb(); /* make sure shadow area is updated before moving tail */
3401  
3402  	IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
3403  	atomic64_inc(&cqp->completed_ops);
3404  
3405  	return ret_code;
3406  }
3407  
3408  /**
3409   * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
3410   * @cqp: struct for cqp hw
3411   * @op_code: cqp opcode for completion
3412   * @compl_info: completion q entry to return
3413   */
irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp * cqp,u8 op_code,struct irdma_ccq_cqe_info * compl_info)3414  int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
3415  				  struct irdma_ccq_cqe_info *compl_info)
3416  {
3417  	struct irdma_ccq_cqe_info info = {};
3418  	struct irdma_sc_cq *ccq;
3419  	int ret_code = 0;
3420  	u32 cnt = 0;
3421  
3422  	ccq = cqp->dev->ccq;
3423  	while (1) {
3424  		if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
3425  			return -ETIMEDOUT;
3426  
3427  		if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
3428  			udelay(cqp->dev->hw_attrs.max_sleep_count);
3429  			continue;
3430  		}
3431  		if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
3432  			ret_code = -EIO;
3433  			break;
3434  		}
3435  		/* make sure op code matches*/
3436  		if (op_code == info.op_code)
3437  			break;
3438  		ibdev_dbg(to_ibdev(cqp->dev),
3439  			  "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
3440  			  op_code, info.op_code);
3441  	}
3442  
3443  	if (compl_info)
3444  		memcpy(compl_info, &info, sizeof(*compl_info));
3445  
3446  	return ret_code;
3447  }
3448  
3449  /**
3450   * irdma_sc_manage_hmc_pm_func_table - manage of function table
3451   * @cqp: struct for cqp hw
3452   * @scratch: u64 saved to be used during cqp completion
3453   * @info: info for the manage function table operation
3454   * @post_sq: flag for cqp db to ring
3455   */
irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp * cqp,struct irdma_hmc_fcn_info * info,u64 scratch,bool post_sq)3456  static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
3457  					     struct irdma_hmc_fcn_info *info,
3458  					     u64 scratch, bool post_sq)
3459  {
3460  	__le64 *wqe;
3461  	u64 hdr;
3462  
3463  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3464  	if (!wqe)
3465  		return -ENOMEM;
3466  
3467  	set_64bit_val(wqe, 0, 0);
3468  	set_64bit_val(wqe, 8, 0);
3469  	set_64bit_val(wqe, 16, 0);
3470  	set_64bit_val(wqe, 32, 0);
3471  	set_64bit_val(wqe, 40, 0);
3472  	set_64bit_val(wqe, 48, 0);
3473  	set_64bit_val(wqe, 56, 0);
3474  
3475  	hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
3476  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE,
3477  			 IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
3478  	      FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
3479  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3480  	dma_wmb(); /* make sure WQE is written before valid bit is set */
3481  
3482  	set_64bit_val(wqe, 24, hdr);
3483  
3484  	print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
3485  			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
3486  			     IRDMA_CQP_WQE_SIZE * 8, false);
3487  	if (post_sq)
3488  		irdma_sc_cqp_post_sq(cqp);
3489  
3490  	return 0;
3491  }
3492  
3493  /**
3494   * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
3495   * for fpm commit
3496   * @cqp: struct for cqp hw
3497   */
irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp * cqp)3498  static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
3499  {
3500  	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
3501  					     NULL);
3502  }
3503  
3504  /**
3505   * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
3506   * @cqp: struct for cqp hw
3507   * @scratch: u64 saved to be used during cqp completion
3508   * @hmc_fn_id: hmc function id
3509   * @commit_fpm_mem: Memory for fpm values
3510   * @post_sq: flag for cqp db to ring
3511   * @wait_type: poll ccq or cqp registers for cqp completion
3512   */
irdma_sc_commit_fpm_val(struct irdma_sc_cqp * cqp,u64 scratch,u8 hmc_fn_id,struct irdma_dma_mem * commit_fpm_mem,bool post_sq,u8 wait_type)3513  static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3514  				   u8 hmc_fn_id,
3515  				   struct irdma_dma_mem *commit_fpm_mem,
3516  				   bool post_sq, u8 wait_type)
3517  {
3518  	__le64 *wqe;
3519  	u64 hdr;
3520  	u32 tail, val, error;
3521  	int ret_code = 0;
3522  
3523  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3524  	if (!wqe)
3525  		return -ENOMEM;
3526  
3527  	set_64bit_val(wqe, 16, hmc_fn_id);
3528  	set_64bit_val(wqe, 32, commit_fpm_mem->pa);
3529  
3530  	hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
3531  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
3532  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3533  
3534  	dma_wmb(); /* make sure WQE is written before valid bit is set */
3535  
3536  	set_64bit_val(wqe, 24, hdr);
3537  
3538  	print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
3539  			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3540  	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3541  
3542  	if (post_sq) {
3543  		irdma_sc_cqp_post_sq(cqp);
3544  		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3545  			ret_code = irdma_cqp_poll_registers(cqp, tail,
3546  							    cqp->dev->hw_attrs.max_done_count);
3547  		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3548  			ret_code = irdma_sc_commit_fpm_val_done(cqp);
3549  	}
3550  
3551  	return ret_code;
3552  }
3553  
3554  /**
3555   * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
3556   * query fpm
3557   * @cqp: struct for cqp hw
3558   */
irdma_sc_query_fpm_val_done(struct irdma_sc_cqp * cqp)3559  static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
3560  {
3561  	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
3562  					     NULL);
3563  }
3564  
3565  /**
3566   * irdma_sc_query_fpm_val - cqp wqe query fpm values
3567   * @cqp: struct for cqp hw
3568   * @scratch: u64 saved to be used during cqp completion
3569   * @hmc_fn_id: hmc function id
3570   * @query_fpm_mem: memory for return fpm values
3571   * @post_sq: flag for cqp db to ring
3572   * @wait_type: poll ccq or cqp registers for cqp completion
3573   */
irdma_sc_query_fpm_val(struct irdma_sc_cqp * cqp,u64 scratch,u8 hmc_fn_id,struct irdma_dma_mem * query_fpm_mem,bool post_sq,u8 wait_type)3574  static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3575  				  u8 hmc_fn_id,
3576  				  struct irdma_dma_mem *query_fpm_mem,
3577  				  bool post_sq, u8 wait_type)
3578  {
3579  	__le64 *wqe;
3580  	u64 hdr;
3581  	u32 tail, val, error;
3582  	int ret_code = 0;
3583  
3584  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3585  	if (!wqe)
3586  		return -ENOMEM;
3587  
3588  	set_64bit_val(wqe, 16, hmc_fn_id);
3589  	set_64bit_val(wqe, 32, query_fpm_mem->pa);
3590  
3591  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
3592  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3593  	dma_wmb(); /* make sure WQE is written before valid bit is set */
3594  
3595  	set_64bit_val(wqe, 24, hdr);
3596  
3597  	print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
3598  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3599  	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3600  
3601  	if (post_sq) {
3602  		irdma_sc_cqp_post_sq(cqp);
3603  		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3604  			ret_code = irdma_cqp_poll_registers(cqp, tail,
3605  							    cqp->dev->hw_attrs.max_done_count);
3606  		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3607  			ret_code = irdma_sc_query_fpm_val_done(cqp);
3608  	}
3609  
3610  	return ret_code;
3611  }
3612  
3613  /**
3614   * irdma_sc_ceq_init - initialize ceq
3615   * @ceq: ceq sc structure
3616   * @info: ceq initialization info
3617   */
irdma_sc_ceq_init(struct irdma_sc_ceq * ceq,struct irdma_ceq_init_info * info)3618  int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
3619  		      struct irdma_ceq_init_info *info)
3620  {
3621  	u32 pble_obj_cnt;
3622  
3623  	if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
3624  	    info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
3625  		return -EINVAL;
3626  
3627  	if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
3628  		return -EINVAL;
3629  	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3630  
3631  	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3632  		return -EINVAL;
3633  
3634  	ceq->size = sizeof(*ceq);
3635  	ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
3636  	ceq->ceq_id = info->ceq_id;
3637  	ceq->dev = info->dev;
3638  	ceq->elem_cnt = info->elem_cnt;
3639  	ceq->ceq_elem_pa = info->ceqe_pa;
3640  	ceq->virtual_map = info->virtual_map;
3641  	ceq->itr_no_expire = info->itr_no_expire;
3642  	ceq->reg_cq = info->reg_cq;
3643  	ceq->reg_cq_size = 0;
3644  	spin_lock_init(&ceq->req_cq_lock);
3645  	ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
3646  	ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
3647  	ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
3648  	ceq->tph_en = info->tph_en;
3649  	ceq->tph_val = info->tph_val;
3650  	ceq->vsi = info->vsi;
3651  	ceq->polarity = 1;
3652  	IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
3653  	ceq->dev->ceq[info->ceq_id] = ceq;
3654  
3655  	return 0;
3656  }
3657  
3658  /**
3659   * irdma_sc_ceq_create - create ceq wqe
3660   * @ceq: ceq sc structure
3661   * @scratch: u64 saved to be used during cqp completion
3662   * @post_sq: flag for cqp db to ring
3663   */
3664  
irdma_sc_ceq_create(struct irdma_sc_ceq * ceq,u64 scratch,bool post_sq)3665  static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
3666  			       bool post_sq)
3667  {
3668  	struct irdma_sc_cqp *cqp;
3669  	__le64 *wqe;
3670  	u64 hdr;
3671  
3672  	cqp = ceq->dev->cqp;
3673  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3674  	if (!wqe)
3675  		return -ENOMEM;
3676  	set_64bit_val(wqe, 16, ceq->elem_cnt);
3677  	set_64bit_val(wqe, 32,
3678  		      (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
3679  	set_64bit_val(wqe, 48,
3680  		      (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
3681  	set_64bit_val(wqe, 56,
3682  		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
3683  		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
3684  	hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
3685  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
3686  	      FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3687  	      FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3688  	      FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
3689  	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3690  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3691  	dma_wmb(); /* make sure WQE is written before valid bit is set */
3692  
3693  	set_64bit_val(wqe, 24, hdr);
3694  
3695  	print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
3696  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3697  	if (post_sq)
3698  		irdma_sc_cqp_post_sq(cqp);
3699  
3700  	return 0;
3701  }
3702  
3703  /**
3704   * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
3705   * @ceq: ceq sc structure
3706   */
irdma_sc_cceq_create_done(struct irdma_sc_ceq * ceq)3707  static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
3708  {
3709  	struct irdma_sc_cqp *cqp;
3710  
3711  	cqp = ceq->dev->cqp;
3712  	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
3713  					     NULL);
3714  }
3715  
3716  /**
3717   * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
3718   * @ceq: ceq sc structure
3719   */
irdma_sc_cceq_destroy_done(struct irdma_sc_ceq * ceq)3720  int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
3721  {
3722  	struct irdma_sc_cqp *cqp;
3723  
3724  	if (ceq->reg_cq)
3725  		irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
3726  
3727  	cqp = ceq->dev->cqp;
3728  	cqp->process_cqp_sds = irdma_update_sds_noccq;
3729  
3730  	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
3731  					     NULL);
3732  }
3733  
3734  /**
3735   * irdma_sc_cceq_create - create cceq
3736   * @ceq: ceq sc structure
3737   * @scratch: u64 saved to be used during cqp completion
3738   */
irdma_sc_cceq_create(struct irdma_sc_ceq * ceq,u64 scratch)3739  int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
3740  {
3741  	int ret_code;
3742  	struct irdma_sc_dev *dev = ceq->dev;
3743  
3744  	dev->ccq->vsi = ceq->vsi;
3745  	if (ceq->reg_cq) {
3746  		ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
3747  		if (ret_code)
3748  			return ret_code;
3749  	}
3750  
3751  	ret_code = irdma_sc_ceq_create(ceq, scratch, true);
3752  	if (!ret_code)
3753  		return irdma_sc_cceq_create_done(ceq);
3754  
3755  	return ret_code;
3756  }
3757  
3758  /**
3759   * irdma_sc_ceq_destroy - destroy ceq
3760   * @ceq: ceq sc structure
3761   * @scratch: u64 saved to be used during cqp completion
3762   * @post_sq: flag for cqp db to ring
3763   */
irdma_sc_ceq_destroy(struct irdma_sc_ceq * ceq,u64 scratch,bool post_sq)3764  int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
3765  {
3766  	struct irdma_sc_cqp *cqp;
3767  	__le64 *wqe;
3768  	u64 hdr;
3769  
3770  	cqp = ceq->dev->cqp;
3771  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3772  	if (!wqe)
3773  		return -ENOMEM;
3774  
3775  	set_64bit_val(wqe, 16, ceq->elem_cnt);
3776  	set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
3777  	hdr = ceq->ceq_id |
3778  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
3779  	      FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3780  	      FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3781  	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3782  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3783  	dma_wmb(); /* make sure WQE is written before valid bit is set */
3784  
3785  	set_64bit_val(wqe, 24, hdr);
3786  
3787  	print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
3788  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3789  	if (post_sq)
3790  		irdma_sc_cqp_post_sq(cqp);
3791  
3792  	return 0;
3793  }
3794  
3795  /**
3796   * irdma_sc_process_ceq - process ceq
3797   * @dev: sc device struct
3798   * @ceq: ceq sc structure
3799   *
3800   * It is expected caller serializes this function with cleanup_ceqes()
3801   * because these functions manipulate the same ceq
3802   */
irdma_sc_process_ceq(struct irdma_sc_dev * dev,struct irdma_sc_ceq * ceq)3803  void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
3804  {
3805  	u64 temp;
3806  	__le64 *ceqe;
3807  	struct irdma_sc_cq *cq = NULL;
3808  	struct irdma_sc_cq *temp_cq;
3809  	u8 polarity;
3810  	u32 cq_idx;
3811  	unsigned long flags;
3812  
3813  	do {
3814  		cq_idx = 0;
3815  		ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
3816  		get_64bit_val(ceqe, 0, &temp);
3817  		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
3818  		if (polarity != ceq->polarity)
3819  			return NULL;
3820  
3821  		temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
3822  		if (!temp_cq) {
3823  			cq_idx = IRDMA_INVALID_CQ_IDX;
3824  			IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3825  
3826  			if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3827  				ceq->polarity ^= 1;
3828  			continue;
3829  		}
3830  
3831  		cq = temp_cq;
3832  		if (ceq->reg_cq) {
3833  			spin_lock_irqsave(&ceq->req_cq_lock, flags);
3834  			cq_idx = irdma_sc_find_reg_cq(ceq, cq);
3835  			spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3836  		}
3837  
3838  		IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3839  		if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3840  			ceq->polarity ^= 1;
3841  	} while (cq_idx == IRDMA_INVALID_CQ_IDX);
3842  
3843  	if (cq)
3844  		irdma_sc_cq_ack(cq);
3845  	return cq;
3846  }
3847  
3848  /**
3849   * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
3850   * @cq: cq for which the ceqes need to be cleaned up
3851   * @ceq: ceq ptr
3852   *
3853   * The function is called after the cq is destroyed to cleanup
3854   * its pending ceqe entries. It is expected caller serializes this
3855   * function with process_ceq() in interrupt context.
3856   */
irdma_sc_cleanup_ceqes(struct irdma_sc_cq * cq,struct irdma_sc_ceq * ceq)3857  void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
3858  {
3859  	struct irdma_sc_cq *next_cq;
3860  	u8 ceq_polarity = ceq->polarity;
3861  	__le64 *ceqe;
3862  	u8 polarity;
3863  	u64 temp;
3864  	int next;
3865  	u32 i;
3866  
3867  	next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
3868  
3869  	for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
3870  		ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
3871  
3872  		get_64bit_val(ceqe, 0, &temp);
3873  		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
3874  		if (polarity != ceq_polarity)
3875  			return;
3876  
3877  		next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
3878  		if (cq == next_cq)
3879  			set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
3880  
3881  		next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
3882  		if (!next)
3883  			ceq_polarity ^= 1;
3884  	}
3885  }
3886  
3887  /**
3888   * irdma_sc_aeq_init - initialize aeq
3889   * @aeq: aeq structure ptr
3890   * @info: aeq initialization info
3891   */
irdma_sc_aeq_init(struct irdma_sc_aeq * aeq,struct irdma_aeq_init_info * info)3892  int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
3893  		      struct irdma_aeq_init_info *info)
3894  {
3895  	u32 pble_obj_cnt;
3896  
3897  	if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
3898  	    info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
3899  		return -EINVAL;
3900  
3901  	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3902  
3903  	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3904  		return -EINVAL;
3905  
3906  	aeq->size = sizeof(*aeq);
3907  	aeq->polarity = 1;
3908  	aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
3909  	aeq->dev = info->dev;
3910  	aeq->elem_cnt = info->elem_cnt;
3911  	aeq->aeq_elem_pa = info->aeq_elem_pa;
3912  	IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
3913  	aeq->virtual_map = info->virtual_map;
3914  	aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
3915  	aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
3916  	aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
3917  	aeq->msix_idx = info->msix_idx;
3918  	info->dev->aeq = aeq;
3919  
3920  	return 0;
3921  }
3922  
3923  /**
3924   * irdma_sc_aeq_create - create aeq
3925   * @aeq: aeq structure ptr
3926   * @scratch: u64 saved to be used during cqp completion
3927   * @post_sq: flag for cqp db to ring
3928   */
irdma_sc_aeq_create(struct irdma_sc_aeq * aeq,u64 scratch,bool post_sq)3929  static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
3930  			       bool post_sq)
3931  {
3932  	__le64 *wqe;
3933  	struct irdma_sc_cqp *cqp;
3934  	u64 hdr;
3935  
3936  	cqp = aeq->dev->cqp;
3937  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3938  	if (!wqe)
3939  		return -ENOMEM;
3940  	set_64bit_val(wqe, 16, aeq->elem_cnt);
3941  	set_64bit_val(wqe, 32,
3942  		      (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
3943  	set_64bit_val(wqe, 48,
3944  		      (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
3945  
3946  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
3947  	      FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
3948  	      FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
3949  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3950  	dma_wmb(); /* make sure WQE is written before valid bit is set */
3951  
3952  	set_64bit_val(wqe, 24, hdr);
3953  
3954  	print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
3955  			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3956  	if (post_sq)
3957  		irdma_sc_cqp_post_sq(cqp);
3958  
3959  	return 0;
3960  }
3961  
3962  /**
3963   * irdma_sc_aeq_destroy - destroy aeq during close
3964   * @aeq: aeq structure ptr
3965   * @scratch: u64 saved to be used during cqp completion
3966   * @post_sq: flag for cqp db to ring
3967   */
irdma_sc_aeq_destroy(struct irdma_sc_aeq * aeq,u64 scratch,bool post_sq)3968  static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
3969  				bool post_sq)
3970  {
3971  	__le64 *wqe;
3972  	struct irdma_sc_cqp *cqp;
3973  	struct irdma_sc_dev *dev;
3974  	u64 hdr;
3975  
3976  	dev = aeq->dev;
3977  	writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
3978  
3979  	cqp = dev->cqp;
3980  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3981  	if (!wqe)
3982  		return -ENOMEM;
3983  	set_64bit_val(wqe, 16, aeq->elem_cnt);
3984  	set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
3985  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
3986  	      FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
3987  	      FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
3988  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3989  	dma_wmb(); /* make sure WQE is written before valid bit is set */
3990  
3991  	set_64bit_val(wqe, 24, hdr);
3992  
3993  	print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
3994  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3995  	if (post_sq)
3996  		irdma_sc_cqp_post_sq(cqp);
3997  	return 0;
3998  }
3999  
4000  /**
4001   * irdma_sc_get_next_aeqe - get next aeq entry
4002   * @aeq: aeq structure ptr
4003   * @info: aeqe info to be returned
4004   */
irdma_sc_get_next_aeqe(struct irdma_sc_aeq * aeq,struct irdma_aeqe_info * info)4005  int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
4006  			   struct irdma_aeqe_info *info)
4007  {
4008  	u64 temp, compl_ctx;
4009  	__le64 *aeqe;
4010  	u8 ae_src;
4011  	u8 polarity;
4012  
4013  	aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
4014  	get_64bit_val(aeqe, 8, &temp);
4015  	polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
4016  
4017  	if (aeq->polarity != polarity)
4018  		return -ENOENT;
4019  
4020  	/* Ensure AEQE contents are read after valid bit is checked */
4021  	dma_rmb();
4022  
4023  	get_64bit_val(aeqe, 0, &compl_ctx);
4024  
4025  	print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
4026  			     aeqe, 16, false);
4027  
4028  	ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
4029  	info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
4030  	info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
4031  			 ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
4032  	info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
4033  	info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
4034  	info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
4035  	info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
4036  	info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
4037  
4038  	info->ae_src = ae_src;
4039  	switch (info->ae_id) {
4040  	case IRDMA_AE_PRIV_OPERATION_DENIED:
4041  	case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
4042  	case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
4043  	case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
4044  	case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
4045  	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
4046  	case IRDMA_AE_UDA_XMIT_BAD_PD:
4047  	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
4048  	case IRDMA_AE_BAD_CLOSE:
4049  	case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
4050  	case IRDMA_AE_STAG_ZERO_INVALID:
4051  	case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
4052  	case IRDMA_AE_IB_INVALID_REQUEST:
4053  	case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
4054  	case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
4055  	case IRDMA_AE_IB_REMOTE_OP_ERROR:
4056  	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
4057  	case IRDMA_AE_DDP_UBE_INVALID_MO:
4058  	case IRDMA_AE_DDP_UBE_INVALID_QN:
4059  	case IRDMA_AE_DDP_NO_L_BIT:
4060  	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4061  	case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4062  	case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
4063  	case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
4064  	case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
4065  	case IRDMA_AE_INVALID_ARP_ENTRY:
4066  	case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
4067  	case IRDMA_AE_STALE_ARP_ENTRY:
4068  	case IRDMA_AE_INVALID_AH_ENTRY:
4069  	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4070  	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
4071  	case IRDMA_AE_LLP_TOO_MANY_RETRIES:
4072  	case IRDMA_AE_LLP_DOUBT_REACHABILITY:
4073  	case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
4074  	case IRDMA_AE_RESET_SENT:
4075  	case IRDMA_AE_TERMINATE_SENT:
4076  	case IRDMA_AE_RESET_NOT_SENT:
4077  	case IRDMA_AE_LCE_QP_CATASTROPHIC:
4078  	case IRDMA_AE_QP_SUSPEND_COMPLETE:
4079  	case IRDMA_AE_UDA_L4LEN_INVALID:
4080  		info->qp = true;
4081  		info->compl_ctx = compl_ctx;
4082  		break;
4083  	case IRDMA_AE_LCE_CQ_CATASTROPHIC:
4084  		info->cq = true;
4085  		info->compl_ctx = compl_ctx << 1;
4086  		ae_src = IRDMA_AE_SOURCE_RSVD;
4087  		break;
4088  	case IRDMA_AE_ROCE_EMPTY_MCG:
4089  	case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
4090  	case IRDMA_AE_ROCE_BAD_MC_QPID:
4091  	case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
4092  		fallthrough;
4093  	case IRDMA_AE_LLP_CONNECTION_RESET:
4094  	case IRDMA_AE_LLP_SYN_RECEIVED:
4095  	case IRDMA_AE_LLP_FIN_RECEIVED:
4096  	case IRDMA_AE_LLP_CLOSE_COMPLETE:
4097  	case IRDMA_AE_LLP_TERMINATE_RECEIVED:
4098  	case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
4099  		ae_src = IRDMA_AE_SOURCE_RSVD;
4100  		info->qp = true;
4101  		info->compl_ctx = compl_ctx;
4102  		break;
4103  	default:
4104  		break;
4105  	}
4106  
4107  	switch (ae_src) {
4108  	case IRDMA_AE_SOURCE_RQ:
4109  	case IRDMA_AE_SOURCE_RQ_0011:
4110  		info->qp = true;
4111  		info->rq = true;
4112  		info->compl_ctx = compl_ctx;
4113  		break;
4114  	case IRDMA_AE_SOURCE_CQ:
4115  	case IRDMA_AE_SOURCE_CQ_0110:
4116  	case IRDMA_AE_SOURCE_CQ_1010:
4117  	case IRDMA_AE_SOURCE_CQ_1110:
4118  		info->cq = true;
4119  		info->compl_ctx = compl_ctx << 1;
4120  		break;
4121  	case IRDMA_AE_SOURCE_SQ:
4122  	case IRDMA_AE_SOURCE_SQ_0111:
4123  		info->qp = true;
4124  		info->sq = true;
4125  		info->compl_ctx = compl_ctx;
4126  		break;
4127  	case IRDMA_AE_SOURCE_IN_RR_WR:
4128  	case IRDMA_AE_SOURCE_IN_RR_WR_1011:
4129  		info->qp = true;
4130  		info->compl_ctx = compl_ctx;
4131  		info->in_rdrsp_wr = true;
4132  		break;
4133  	case IRDMA_AE_SOURCE_OUT_RR:
4134  	case IRDMA_AE_SOURCE_OUT_RR_1111:
4135  		info->qp = true;
4136  		info->compl_ctx = compl_ctx;
4137  		info->out_rdrsp = true;
4138  		break;
4139  	case IRDMA_AE_SOURCE_RSVD:
4140  	default:
4141  		break;
4142  	}
4143  
4144  	IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
4145  	if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
4146  		aeq->polarity ^= 1;
4147  
4148  	return 0;
4149  }
4150  
4151  /**
4152   * irdma_sc_repost_aeq_entries - repost completed aeq entries
4153   * @dev: sc device struct
4154   * @count: allocate count
4155   */
irdma_sc_repost_aeq_entries(struct irdma_sc_dev * dev,u32 count)4156  void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
4157  {
4158  	writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
4159  }
4160  
4161  /**
4162   * irdma_sc_ccq_init - initialize control cq
4163   * @cq: sc's cq ctruct
4164   * @info: info for control cq initialization
4165   */
irdma_sc_ccq_init(struct irdma_sc_cq * cq,struct irdma_ccq_init_info * info)4166  int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
4167  {
4168  	u32 pble_obj_cnt;
4169  
4170  	if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
4171  	    info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
4172  		return -EINVAL;
4173  
4174  	if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
4175  		return -EINVAL;
4176  
4177  	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
4178  
4179  	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
4180  		return -EINVAL;
4181  
4182  	cq->cq_pa = info->cq_pa;
4183  	cq->cq_uk.cq_base = info->cq_base;
4184  	cq->shadow_area_pa = info->shadow_area_pa;
4185  	cq->cq_uk.shadow_area = info->shadow_area;
4186  	cq->shadow_read_threshold = info->shadow_read_threshold;
4187  	cq->dev = info->dev;
4188  	cq->ceq_id = info->ceq_id;
4189  	cq->cq_uk.cq_size = info->num_elem;
4190  	cq->cq_type = IRDMA_CQ_TYPE_CQP;
4191  	cq->ceqe_mask = info->ceqe_mask;
4192  	IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
4193  	cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
4194  	cq->ceq_id_valid = info->ceq_id_valid;
4195  	cq->tph_en = info->tph_en;
4196  	cq->tph_val = info->tph_val;
4197  	cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
4198  	cq->pbl_list = info->pbl_list;
4199  	cq->virtual_map = info->virtual_map;
4200  	cq->pbl_chunk_size = info->pbl_chunk_size;
4201  	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
4202  	cq->cq_uk.polarity = true;
4203  	cq->vsi = info->vsi;
4204  	cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
4205  
4206  	/* Only applicable to CQs other than CCQ so initialize to zero */
4207  	cq->cq_uk.cqe_alloc_db = NULL;
4208  
4209  	info->dev->ccq = cq;
4210  	return 0;
4211  }
4212  
4213  /**
4214   * irdma_sc_ccq_create_done - poll cqp for ccq create
4215   * @ccq: ccq sc struct
4216   */
irdma_sc_ccq_create_done(struct irdma_sc_cq * ccq)4217  static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
4218  {
4219  	struct irdma_sc_cqp *cqp;
4220  
4221  	cqp = ccq->dev->cqp;
4222  
4223  	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
4224  }
4225  
4226  /**
4227   * irdma_sc_ccq_create - create control cq
4228   * @ccq: ccq sc struct
4229   * @scratch: u64 saved to be used during cqp completion
4230   * @check_overflow: overlow flag for ccq
4231   * @post_sq: flag for cqp db to ring
4232   */
irdma_sc_ccq_create(struct irdma_sc_cq * ccq,u64 scratch,bool check_overflow,bool post_sq)4233  int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
4234  			bool check_overflow, bool post_sq)
4235  {
4236  	int ret_code;
4237  
4238  	ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
4239  	if (ret_code)
4240  		return ret_code;
4241  
4242  	if (post_sq) {
4243  		ret_code = irdma_sc_ccq_create_done(ccq);
4244  		if (ret_code)
4245  			return ret_code;
4246  	}
4247  	ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
4248  
4249  	return 0;
4250  }
4251  
4252  /**
4253   * irdma_sc_ccq_destroy - destroy ccq during close
4254   * @ccq: ccq sc struct
4255   * @scratch: u64 saved to be used during cqp completion
4256   * @post_sq: flag for cqp db to ring
4257   */
irdma_sc_ccq_destroy(struct irdma_sc_cq * ccq,u64 scratch,bool post_sq)4258  int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
4259  {
4260  	struct irdma_sc_cqp *cqp;
4261  	__le64 *wqe;
4262  	u64 hdr;
4263  	int ret_code = 0;
4264  	u32 tail, val, error;
4265  
4266  	cqp = ccq->dev->cqp;
4267  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4268  	if (!wqe)
4269  		return -ENOMEM;
4270  
4271  	set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
4272  	set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
4273  	set_64bit_val(wqe, 40, ccq->shadow_area_pa);
4274  
4275  	hdr = ccq->cq_uk.cq_id |
4276  	      FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
4277  			IRDMA_CQPSQ_CQ_CEQID) |
4278  	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
4279  	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
4280  	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
4281  	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
4282  	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
4283  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4284  	dma_wmb(); /* make sure WQE is written before valid bit is set */
4285  
4286  	set_64bit_val(wqe, 24, hdr);
4287  
4288  	print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
4289  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4290  	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4291  
4292  	if (post_sq) {
4293  		irdma_sc_cqp_post_sq(cqp);
4294  		ret_code = irdma_cqp_poll_registers(cqp, tail,
4295  						    cqp->dev->hw_attrs.max_done_count);
4296  	}
4297  
4298  	cqp->process_cqp_sds = irdma_update_sds_noccq;
4299  
4300  	return ret_code;
4301  }
4302  
4303  /**
4304   * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
4305   * @dev : ptr to irdma_dev struct
4306   * @hmc_fn_id: hmc function id
4307   */
irdma_sc_init_iw_hmc(struct irdma_sc_dev * dev,u8 hmc_fn_id)4308  int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
4309  {
4310  	struct irdma_hmc_info *hmc_info;
4311  	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4312  	struct irdma_dma_mem query_fpm_mem;
4313  	int ret_code = 0;
4314  	u8 wait_type;
4315  
4316  	hmc_info = dev->hmc_info;
4317  	hmc_fpm_misc = &dev->hmc_fpm_misc;
4318  	query_fpm_mem.pa = dev->fpm_query_buf_pa;
4319  	query_fpm_mem.va = dev->fpm_query_buf;
4320  	hmc_info->hmc_fn_id = hmc_fn_id;
4321  	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4322  
4323  	ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4324  					  &query_fpm_mem, true, wait_type);
4325  	if (ret_code)
4326  		return ret_code;
4327  
4328  	/* parse the fpm_query_buf and fill hmc obj info */
4329  	ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
4330  						hmc_fpm_misc);
4331  
4332  	print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4333  			     8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
4334  			     false);
4335  	return ret_code;
4336  }
4337  
4338  /**
4339   * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
4340   * command and populates fpm base address in hmc_info
4341   * @dev : ptr to irdma_dev struct
4342   * @hmc_fn_id: hmc function id
4343   */
irdma_sc_cfg_iw_fpm(struct irdma_sc_dev * dev,u8 hmc_fn_id)4344  static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
4345  {
4346  	struct irdma_hmc_info *hmc_info;
4347  	struct irdma_hmc_obj_info *obj_info;
4348  	__le64 *buf;
4349  	struct irdma_dma_mem commit_fpm_mem;
4350  	int ret_code = 0;
4351  	u8 wait_type;
4352  
4353  	hmc_info = dev->hmc_info;
4354  	obj_info = hmc_info->hmc_obj;
4355  	buf = dev->fpm_commit_buf;
4356  
4357  	set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
4358  	set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
4359  	set_64bit_val(buf, 16, (u64)0); /* RSRVD */
4360  	set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
4361  	set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
4362  	set_64bit_val(buf, 40, (u64)0); /* RSVD */
4363  	set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
4364  	set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
4365  	set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
4366  	set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
4367  	set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
4368  	set_64bit_val(buf, 88,
4369  		      (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
4370  	set_64bit_val(buf, 96,
4371  		      (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
4372  	set_64bit_val(buf, 104,
4373  		      (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
4374  	set_64bit_val(buf, 112,
4375  		      (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
4376  	set_64bit_val(buf, 120, (u64)0); /* RSVD */
4377  	set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
4378  	set_64bit_val(buf, 136,
4379  		      (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
4380  	set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
4381  	set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
4382  	set_64bit_val(buf, 160,
4383  		      (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
4384  	set_64bit_val(buf, 168,
4385  		      (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
4386  
4387  	commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
4388  	commit_fpm_mem.va = dev->fpm_commit_buf;
4389  
4390  	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4391  	print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4392  			     8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
4393  			     false);
4394  	ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4395  					   &commit_fpm_mem, true, wait_type);
4396  	if (!ret_code)
4397  		irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
4398  					      hmc_info->hmc_obj,
4399  					      &hmc_info->sd_table.sd_cnt);
4400  	print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4401  			     8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
4402  			     false);
4403  
4404  	return ret_code;
4405  }
4406  
4407  /**
4408   * cqp_sds_wqe_fill - fill cqp wqe doe sd
4409   * @cqp: struct for cqp hw
4410   * @info: sd info for wqe
4411   * @scratch: u64 saved to be used during cqp completion
4412   */
cqp_sds_wqe_fill(struct irdma_sc_cqp * cqp,struct irdma_update_sds_info * info,u64 scratch)4413  static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
4414  			    struct irdma_update_sds_info *info, u64 scratch)
4415  {
4416  	u64 data;
4417  	u64 hdr;
4418  	__le64 *wqe;
4419  	int mem_entries, wqe_entries;
4420  	struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
4421  	u64 offset = 0;
4422  	u32 wqe_idx;
4423  
4424  	wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
4425  	if (!wqe)
4426  		return -ENOMEM;
4427  
4428  	wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
4429  	mem_entries = info->cnt - wqe_entries;
4430  
4431  	if (mem_entries) {
4432  		offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
4433  		memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
4434  
4435  		data = (u64)sdbuf->pa + offset;
4436  	} else {
4437  		data = 0;
4438  	}
4439  	data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
4440  	set_64bit_val(wqe, 16, data);
4441  
4442  	switch (wqe_entries) {
4443  	case 3:
4444  		set_64bit_val(wqe, 48,
4445  			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
4446  			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4447  
4448  		set_64bit_val(wqe, 56, info->entry[2].data);
4449  		fallthrough;
4450  	case 2:
4451  		set_64bit_val(wqe, 32,
4452  			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
4453  			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4454  
4455  		set_64bit_val(wqe, 40, info->entry[1].data);
4456  		fallthrough;
4457  	case 1:
4458  		set_64bit_val(wqe, 0,
4459  			      FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
4460  
4461  		set_64bit_val(wqe, 8, info->entry[0].data);
4462  		break;
4463  	default:
4464  		break;
4465  	}
4466  
4467  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
4468  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
4469  	      FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
4470  	dma_wmb(); /* make sure WQE is written before valid bit is set */
4471  
4472  	set_64bit_val(wqe, 24, hdr);
4473  
4474  	if (mem_entries)
4475  		print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
4476  				     DUMP_PREFIX_OFFSET, 16, 8,
4477  				     (char *)sdbuf->va + offset,
4478  				     mem_entries << 4, false);
4479  
4480  	print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
4481  			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4482  
4483  	return 0;
4484  }
4485  
4486  /**
4487   * irdma_update_pe_sds - cqp wqe for sd
4488   * @dev: ptr to irdma_dev struct
4489   * @info: sd info for sd's
4490   * @scratch: u64 saved to be used during cqp completion
4491   */
irdma_update_pe_sds(struct irdma_sc_dev * dev,struct irdma_update_sds_info * info,u64 scratch)4492  static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
4493  			       struct irdma_update_sds_info *info, u64 scratch)
4494  {
4495  	struct irdma_sc_cqp *cqp = dev->cqp;
4496  	int ret_code;
4497  
4498  	ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
4499  	if (!ret_code)
4500  		irdma_sc_cqp_post_sq(cqp);
4501  
4502  	return ret_code;
4503  }
4504  
4505  /**
4506   * irdma_update_sds_noccq - update sd before ccq created
4507   * @dev: sc device struct
4508   * @info: sd info for sd's
4509   */
irdma_update_sds_noccq(struct irdma_sc_dev * dev,struct irdma_update_sds_info * info)4510  int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
4511  			   struct irdma_update_sds_info *info)
4512  {
4513  	u32 error, val, tail;
4514  	struct irdma_sc_cqp *cqp = dev->cqp;
4515  	int ret_code;
4516  
4517  	ret_code = cqp_sds_wqe_fill(cqp, info, 0);
4518  	if (ret_code)
4519  		return ret_code;
4520  
4521  	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4522  
4523  	irdma_sc_cqp_post_sq(cqp);
4524  	return irdma_cqp_poll_registers(cqp, tail,
4525  					cqp->dev->hw_attrs.max_done_count);
4526  }
4527  
4528  /**
4529   * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
4530   * @cqp: struct for cqp hw
4531   * @scratch: u64 saved to be used during cqp completion
4532   * @hmc_fn_id: hmc function id
4533   * @post_sq: flag for cqp db to ring
4534   * @poll_registers: flag to poll register for cqp completion
4535   */
irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp * cqp,u64 scratch,u8 hmc_fn_id,bool post_sq,bool poll_registers)4536  int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
4537  					u8 hmc_fn_id, bool post_sq,
4538  					bool poll_registers)
4539  {
4540  	u64 hdr;
4541  	__le64 *wqe;
4542  	u32 tail, val, error;
4543  
4544  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4545  	if (!wqe)
4546  		return -ENOMEM;
4547  
4548  	set_64bit_val(wqe, 16,
4549  		      FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
4550  
4551  	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
4552  			 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
4553  	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4554  	dma_wmb(); /* make sure WQE is written before valid bit is set */
4555  
4556  	set_64bit_val(wqe, 24, hdr);
4557  
4558  	print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
4559  			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
4560  			     IRDMA_CQP_WQE_SIZE * 8, false);
4561  	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4562  
4563  	if (post_sq) {
4564  		irdma_sc_cqp_post_sq(cqp);
4565  		if (poll_registers)
4566  			/* check for cqp sq tail update */
4567  			return irdma_cqp_poll_registers(cqp, tail,
4568  							cqp->dev->hw_attrs.max_done_count);
4569  		else
4570  			return irdma_sc_poll_for_cqp_op_done(cqp,
4571  							     IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
4572  							     NULL);
4573  	}
4574  
4575  	return 0;
4576  }
4577  
4578  /**
4579   * irdma_cqp_ring_full - check if cqp ring is full
4580   * @cqp: struct for cqp hw
4581   */
irdma_cqp_ring_full(struct irdma_sc_cqp * cqp)4582  static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
4583  {
4584  	return IRDMA_RING_FULL_ERR(cqp->sq_ring);
4585  }
4586  
4587  /**
4588   * irdma_est_sd - returns approximate number of SDs for HMC
4589   * @dev: sc device struct
4590   * @hmc_info: hmc structure, size and count for HMC objects
4591   */
irdma_est_sd(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info)4592  static u32 irdma_est_sd(struct irdma_sc_dev *dev,
4593  			struct irdma_hmc_info *hmc_info)
4594  {
4595  	int i;
4596  	u64 size = 0;
4597  	u64 sd;
4598  
4599  	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4600  		if (i != IRDMA_HMC_IW_PBLE)
4601  			size += round_up(hmc_info->hmc_obj[i].cnt *
4602  					 hmc_info->hmc_obj[i].size, 512);
4603  	size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
4604  			 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
4605  	if (size & 0x1FFFFF)
4606  		sd = (size >> 21) + 1; /* add 1 for remainder */
4607  	else
4608  		sd = size >> 21;
4609  	if (sd > 0xFFFFFFFF) {
4610  		ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
4611  		sd = 0xFFFFFFFF - 1;
4612  	}
4613  
4614  	return (u32)sd;
4615  }
4616  
4617  /**
4618   * irdma_sc_query_rdma_features_done - poll cqp for query features done
4619   * @cqp: struct for cqp hw
4620   */
irdma_sc_query_rdma_features_done(struct irdma_sc_cqp * cqp)4621  static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
4622  {
4623  	return irdma_sc_poll_for_cqp_op_done(cqp,
4624  					     IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
4625  					     NULL);
4626  }
4627  
4628  /**
4629   * irdma_sc_query_rdma_features - query RDMA features and FW ver
4630   * @cqp: struct for cqp hw
4631   * @buf: buffer to hold query info
4632   * @scratch: u64 saved to be used during cqp completion
4633   */
irdma_sc_query_rdma_features(struct irdma_sc_cqp * cqp,struct irdma_dma_mem * buf,u64 scratch)4634  static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
4635  					struct irdma_dma_mem *buf, u64 scratch)
4636  {
4637  	__le64 *wqe;
4638  	u64 temp;
4639  
4640  	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4641  	if (!wqe)
4642  		return -ENOMEM;
4643  
4644  	temp = buf->pa;
4645  	set_64bit_val(wqe, 32, temp);
4646  
4647  	temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
4648  			  cqp->polarity) |
4649  	       FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
4650  	       FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
4651  	dma_wmb(); /* make sure WQE is written before valid bit is set */
4652  
4653  	set_64bit_val(wqe, 24, temp);
4654  
4655  	print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
4656  			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4657  	irdma_sc_cqp_post_sq(cqp);
4658  
4659  	return 0;
4660  }
4661  
4662  /**
4663   * irdma_get_rdma_features - get RDMA features
4664   * @dev: sc device struct
4665   */
irdma_get_rdma_features(struct irdma_sc_dev * dev)4666  int irdma_get_rdma_features(struct irdma_sc_dev *dev)
4667  {
4668  	int ret_code;
4669  	struct irdma_dma_mem feat_buf;
4670  	u64 temp;
4671  	u16 byte_idx, feat_type, feat_cnt, feat_idx;
4672  
4673  	feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
4674  			      IRDMA_FEATURE_BUF_ALIGNMENT);
4675  	feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
4676  					 &feat_buf.pa, GFP_KERNEL);
4677  	if (!feat_buf.va)
4678  		return -ENOMEM;
4679  
4680  	ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4681  	if (!ret_code)
4682  		ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4683  	if (ret_code)
4684  		goto exit;
4685  
4686  	get_64bit_val(feat_buf.va, 0, &temp);
4687  	feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4688  	if (feat_cnt < 2) {
4689  		ret_code = -EINVAL;
4690  		goto exit;
4691  	} else if (feat_cnt > IRDMA_MAX_FEATURES) {
4692  		ibdev_dbg(to_ibdev(dev),
4693  			  "DEV: feature buf size insufficient, retrying with larger buffer\n");
4694  		dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
4695  				  feat_buf.pa);
4696  		feat_buf.va = NULL;
4697  		feat_buf.size = ALIGN(8 * feat_cnt,
4698  				      IRDMA_FEATURE_BUF_ALIGNMENT);
4699  		feat_buf.va = dma_alloc_coherent(dev->hw->device,
4700  						 feat_buf.size, &feat_buf.pa,
4701  						 GFP_KERNEL);
4702  		if (!feat_buf.va)
4703  			return -ENOMEM;
4704  
4705  		ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4706  		if (!ret_code)
4707  			ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4708  		if (ret_code)
4709  			goto exit;
4710  
4711  		get_64bit_val(feat_buf.va, 0, &temp);
4712  		feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4713  		if (feat_cnt < 2) {
4714  			ret_code = -EINVAL;
4715  			goto exit;
4716  		}
4717  	}
4718  
4719  	print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
4720  			     16, 8, feat_buf.va, feat_cnt * 8, false);
4721  
4722  	for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
4723  	     feat_idx++, byte_idx += 8) {
4724  		get_64bit_val(feat_buf.va, byte_idx, &temp);
4725  		feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
4726  		if (feat_type >= IRDMA_MAX_FEATURES) {
4727  			ibdev_dbg(to_ibdev(dev),
4728  				  "DEV: found unrecognized feature type %d\n",
4729  				  feat_type);
4730  			continue;
4731  		}
4732  		dev->feature_info[feat_type] = temp;
4733  	}
4734  exit:
4735  	dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
4736  			  feat_buf.pa);
4737  	feat_buf.va = NULL;
4738  	return ret_code;
4739  }
4740  
irdma_q1_cnt(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 qpwanted)4741  static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
4742  			struct irdma_hmc_info *hmc_info, u32 qpwanted)
4743  {
4744  	u32 q1_cnt;
4745  
4746  	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
4747  		q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
4748  	} else {
4749  		if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
4750  			q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
4751  		else
4752  			q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
4753  	}
4754  
4755  	return q1_cnt;
4756  }
4757  
cfg_fpm_value_gen_1(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 qpwanted)4758  static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
4759  				struct irdma_hmc_info *hmc_info, u32 qpwanted)
4760  {
4761  	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
4762  }
4763  
cfg_fpm_value_gen_2(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 qpwanted)4764  static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
4765  				struct irdma_hmc_info *hmc_info, u32 qpwanted)
4766  {
4767  	struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
4768  
4769  	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
4770  		4 * hmc_fpm_misc->xf_block_size * qpwanted;
4771  
4772  	hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
4773  
4774  	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
4775  		hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
4776  	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
4777  		hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
4778  			hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
4779  			hmc_fpm_misc->rrf_block_size;
4780  	if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
4781  		hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
4782  	if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
4783  		hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
4784  			hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
4785  			hmc_fpm_misc->ooiscf_block_size;
4786  }
4787  
4788  /**
4789   * irdma_cfg_fpm_val - configure HMC objects
4790   * @dev: sc device struct
4791   * @qp_count: desired qp count
4792   */
irdma_cfg_fpm_val(struct irdma_sc_dev * dev,u32 qp_count)4793  int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
4794  {
4795  	struct irdma_virt_mem virt_mem;
4796  	u32 i, mem_size;
4797  	u32 qpwanted, mrwanted, pblewanted;
4798  	u32 powerof2, hte;
4799  	u32 sd_needed;
4800  	u32 sd_diff;
4801  	u32 loop_count = 0;
4802  	struct irdma_hmc_info *hmc_info;
4803  	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4804  	int ret_code = 0;
4805  
4806  	hmc_info = dev->hmc_info;
4807  	hmc_fpm_misc = &dev->hmc_fpm_misc;
4808  
4809  	ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
4810  	if (ret_code) {
4811  		ibdev_dbg(to_ibdev(dev),
4812  			  "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
4813  			  ret_code);
4814  		return ret_code;
4815  	}
4816  
4817  	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4818  		hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
4819  	sd_needed = irdma_est_sd(dev, hmc_info);
4820  	ibdev_dbg(to_ibdev(dev),
4821  		  "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
4822  		  sd_needed, hmc_info->first_sd_index);
4823  	ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
4824  		  hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
4825  
4826  	qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
4827  
4828  	powerof2 = 1;
4829  	while (powerof2 <= qpwanted)
4830  		powerof2 *= 2;
4831  	powerof2 /= 2;
4832  	qpwanted = powerof2;
4833  
4834  	mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
4835  	pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
4836  
4837  	ibdev_dbg(to_ibdev(dev),
4838  		  "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
4839  		  qp_count, hmc_fpm_misc->max_sds,
4840  		  hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
4841  		  hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
4842  		  hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
4843  		  hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
4844  		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
4845  		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
4846  	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
4847  		hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
4848  	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
4849  		hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
4850  	hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
4851  		hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
4852  
4853  	hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
4854  
4855  	while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
4856  		qpwanted /= 2;
4857  
4858  	do {
4859  		++loop_count;
4860  		hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
4861  		hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
4862  			min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
4863  		hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
4864  		hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
4865  
4866  		hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
4867  		powerof2 = 1;
4868  		while (powerof2 < hte)
4869  			powerof2 *= 2;
4870  		hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
4871  			powerof2 * hmc_fpm_misc->ht_multiplier;
4872  		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
4873  			cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
4874  		else
4875  			cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
4876  
4877  		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
4878  		hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
4879  			hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
4880  		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
4881  			hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
4882  		hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
4883  			(round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
4884  
4885  		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
4886  		sd_needed = irdma_est_sd(dev, hmc_info);
4887  		ibdev_dbg(to_ibdev(dev),
4888  			  "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
4889  			  sd_needed, hmc_fpm_misc->max_sds, mrwanted,
4890  			  pblewanted, qpwanted);
4891  
4892  		/* Do not reduce resources further. All objects fit with max SDs */
4893  		if (sd_needed <= hmc_fpm_misc->max_sds)
4894  			break;
4895  
4896  		sd_diff = sd_needed - hmc_fpm_misc->max_sds;
4897  		if (sd_diff > 128) {
4898  			if (!(loop_count % 2) && qpwanted > 128) {
4899  				qpwanted /= 2;
4900  			} else {
4901  				mrwanted /= 2;
4902  				pblewanted /= 2;
4903  			}
4904  			continue;
4905  		}
4906  		if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
4907  		    pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
4908  			pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
4909  			continue;
4910  		} else if (pblewanted > (100 * FPM_MULTIPLIER)) {
4911  			pblewanted -= 10 * FPM_MULTIPLIER;
4912  		} else if (pblewanted > FPM_MULTIPLIER) {
4913  			pblewanted -= FPM_MULTIPLIER;
4914  		} else if (qpwanted <= 128) {
4915  			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
4916  				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
4917  			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
4918  				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
4919  		}
4920  		if (mrwanted > FPM_MULTIPLIER)
4921  			mrwanted -= FPM_MULTIPLIER;
4922  		if (!(loop_count % 10) && qpwanted > 128) {
4923  			qpwanted /= 2;
4924  			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
4925  				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
4926  		}
4927  	} while (loop_count < 2000);
4928  
4929  	if (sd_needed > hmc_fpm_misc->max_sds) {
4930  		ibdev_dbg(to_ibdev(dev),
4931  			  "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
4932  			  loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
4933  		return -EINVAL;
4934  	}
4935  
4936  	if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
4937  		pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
4938  			      FPM_MULTIPLIER;
4939  		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
4940  		sd_needed = irdma_est_sd(dev, hmc_info);
4941  	}
4942  
4943  	ibdev_dbg(to_ibdev(dev),
4944  		  "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
4945  		  loop_count, sd_needed,
4946  		  hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
4947  		  hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
4948  		  hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
4949  		  hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
4950  		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
4951  		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
4952  		  hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
4953  
4954  	ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
4955  	if (ret_code) {
4956  		ibdev_dbg(to_ibdev(dev),
4957  			  "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
4958  			  readl(dev->hw_regs[IRDMA_CQPERRCODES]));
4959  		return ret_code;
4960  	}
4961  
4962  	mem_size = sizeof(struct irdma_hmc_sd_entry) *
4963  		   (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
4964  	virt_mem.size = mem_size;
4965  	virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
4966  	if (!virt_mem.va) {
4967  		ibdev_dbg(to_ibdev(dev),
4968  			  "HMC: failed to allocate memory for sd_entry buffer\n");
4969  		return -ENOMEM;
4970  	}
4971  	hmc_info->sd_table.sd_entry = virt_mem.va;
4972  
4973  	return ret_code;
4974  }
4975  
4976  /**
4977   * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
4978   * @dev: rdma device
4979   * @pcmdinfo: cqp command info
4980   */
irdma_exec_cqp_cmd(struct irdma_sc_dev * dev,struct cqp_cmds_info * pcmdinfo)4981  static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
4982  			      struct cqp_cmds_info *pcmdinfo)
4983  {
4984  	int status;
4985  	struct irdma_dma_mem val_mem;
4986  	bool alloc = false;
4987  
4988  	dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
4989  	switch (pcmdinfo->cqp_cmd) {
4990  	case IRDMA_OP_CEQ_DESTROY:
4991  		status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
4992  					      pcmdinfo->in.u.ceq_destroy.scratch,
4993  					      pcmdinfo->post_sq);
4994  		break;
4995  	case IRDMA_OP_AEQ_DESTROY:
4996  		status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
4997  					      pcmdinfo->in.u.aeq_destroy.scratch,
4998  					      pcmdinfo->post_sq);
4999  
5000  		break;
5001  	case IRDMA_OP_CEQ_CREATE:
5002  		status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
5003  					     pcmdinfo->in.u.ceq_create.scratch,
5004  					     pcmdinfo->post_sq);
5005  		break;
5006  	case IRDMA_OP_AEQ_CREATE:
5007  		status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
5008  					     pcmdinfo->in.u.aeq_create.scratch,
5009  					     pcmdinfo->post_sq);
5010  		break;
5011  	case IRDMA_OP_QP_UPLOAD_CONTEXT:
5012  		status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
5013  						    &pcmdinfo->in.u.qp_upload_context.info,
5014  						    pcmdinfo->in.u.qp_upload_context.scratch,
5015  						    pcmdinfo->post_sq);
5016  		break;
5017  	case IRDMA_OP_CQ_CREATE:
5018  		status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
5019  					    pcmdinfo->in.u.cq_create.scratch,
5020  					    pcmdinfo->in.u.cq_create.check_overflow,
5021  					    pcmdinfo->post_sq);
5022  		break;
5023  	case IRDMA_OP_CQ_MODIFY:
5024  		status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
5025  					    &pcmdinfo->in.u.cq_modify.info,
5026  					    pcmdinfo->in.u.cq_modify.scratch,
5027  					    pcmdinfo->post_sq);
5028  		break;
5029  	case IRDMA_OP_CQ_DESTROY:
5030  		status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
5031  					     pcmdinfo->in.u.cq_destroy.scratch,
5032  					     pcmdinfo->post_sq);
5033  		break;
5034  	case IRDMA_OP_QP_FLUSH_WQES:
5035  		status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
5036  						&pcmdinfo->in.u.qp_flush_wqes.info,
5037  						pcmdinfo->in.u.qp_flush_wqes.scratch,
5038  						pcmdinfo->post_sq);
5039  		break;
5040  	case IRDMA_OP_GEN_AE:
5041  		status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
5042  					 &pcmdinfo->in.u.gen_ae.info,
5043  					 pcmdinfo->in.u.gen_ae.scratch,
5044  					 pcmdinfo->post_sq);
5045  		break;
5046  	case IRDMA_OP_MANAGE_PUSH_PAGE:
5047  		status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
5048  						   &pcmdinfo->in.u.manage_push_page.info,
5049  						   pcmdinfo->in.u.manage_push_page.scratch,
5050  						   pcmdinfo->post_sq);
5051  		break;
5052  	case IRDMA_OP_UPDATE_PE_SDS:
5053  		status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
5054  					     &pcmdinfo->in.u.update_pe_sds.info,
5055  					     pcmdinfo->in.u.update_pe_sds.scratch);
5056  		break;
5057  	case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
5058  		/* switch to calling through the call table */
5059  		status =
5060  			irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
5061  							  &pcmdinfo->in.u.manage_hmc_pm.info,
5062  							  pcmdinfo->in.u.manage_hmc_pm.scratch,
5063  							  true);
5064  		break;
5065  	case IRDMA_OP_SUSPEND:
5066  		status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
5067  					     pcmdinfo->in.u.suspend_resume.qp,
5068  					     pcmdinfo->in.u.suspend_resume.scratch);
5069  		break;
5070  	case IRDMA_OP_RESUME:
5071  		status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
5072  					    pcmdinfo->in.u.suspend_resume.qp,
5073  					    pcmdinfo->in.u.suspend_resume.scratch);
5074  		break;
5075  	case IRDMA_OP_QUERY_FPM_VAL:
5076  		val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
5077  		val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
5078  		status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
5079  						pcmdinfo->in.u.query_fpm_val.scratch,
5080  						pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
5081  						&val_mem, true, IRDMA_CQP_WAIT_EVENT);
5082  		break;
5083  	case IRDMA_OP_COMMIT_FPM_VAL:
5084  		val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
5085  		val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
5086  		status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
5087  						 pcmdinfo->in.u.commit_fpm_val.scratch,
5088  						 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
5089  						 &val_mem,
5090  						 true,
5091  						 IRDMA_CQP_WAIT_EVENT);
5092  		break;
5093  	case IRDMA_OP_STATS_ALLOCATE:
5094  		alloc = true;
5095  		fallthrough;
5096  	case IRDMA_OP_STATS_FREE:
5097  		status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
5098  						    &pcmdinfo->in.u.stats_manage.info,
5099  						    alloc,
5100  						    pcmdinfo->in.u.stats_manage.scratch);
5101  		break;
5102  	case IRDMA_OP_STATS_GATHER:
5103  		status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
5104  					       &pcmdinfo->in.u.stats_gather.info,
5105  					       pcmdinfo->in.u.stats_gather.scratch);
5106  		break;
5107  	case IRDMA_OP_WS_MODIFY_NODE:
5108  		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5109  						 &pcmdinfo->in.u.ws_node.info,
5110  						 IRDMA_MODIFY_NODE,
5111  						 pcmdinfo->in.u.ws_node.scratch);
5112  		break;
5113  	case IRDMA_OP_WS_DELETE_NODE:
5114  		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5115  						 &pcmdinfo->in.u.ws_node.info,
5116  						 IRDMA_DEL_NODE,
5117  						 pcmdinfo->in.u.ws_node.scratch);
5118  		break;
5119  	case IRDMA_OP_WS_ADD_NODE:
5120  		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5121  						 &pcmdinfo->in.u.ws_node.info,
5122  						 IRDMA_ADD_NODE,
5123  						 pcmdinfo->in.u.ws_node.scratch);
5124  		break;
5125  	case IRDMA_OP_SET_UP_MAP:
5126  		status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
5127  					     &pcmdinfo->in.u.up_map.info,
5128  					     pcmdinfo->in.u.up_map.scratch);
5129  		break;
5130  	case IRDMA_OP_QUERY_RDMA_FEATURES:
5131  		status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
5132  						      &pcmdinfo->in.u.query_rdma.query_buff_mem,
5133  						      pcmdinfo->in.u.query_rdma.scratch);
5134  		break;
5135  	case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
5136  		status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
5137  						      pcmdinfo->in.u.del_arp_cache_entry.scratch,
5138  						      pcmdinfo->in.u.del_arp_cache_entry.arp_index,
5139  						      pcmdinfo->post_sq);
5140  		break;
5141  	case IRDMA_OP_MANAGE_APBVT_ENTRY:
5142  		status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
5143  						     &pcmdinfo->in.u.manage_apbvt_entry.info,
5144  						     pcmdinfo->in.u.manage_apbvt_entry.scratch,
5145  						     pcmdinfo->post_sq);
5146  		break;
5147  	case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
5148  		status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
5149  							   &pcmdinfo->in.u.manage_qhash_table_entry.info,
5150  							   pcmdinfo->in.u.manage_qhash_table_entry.scratch,
5151  							   pcmdinfo->post_sq);
5152  		break;
5153  	case IRDMA_OP_QP_MODIFY:
5154  		status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
5155  					    &pcmdinfo->in.u.qp_modify.info,
5156  					    pcmdinfo->in.u.qp_modify.scratch,
5157  					    pcmdinfo->post_sq);
5158  		break;
5159  	case IRDMA_OP_QP_CREATE:
5160  		status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
5161  					    &pcmdinfo->in.u.qp_create.info,
5162  					    pcmdinfo->in.u.qp_create.scratch,
5163  					    pcmdinfo->post_sq);
5164  		break;
5165  	case IRDMA_OP_QP_DESTROY:
5166  		status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
5167  					     pcmdinfo->in.u.qp_destroy.scratch,
5168  					     pcmdinfo->in.u.qp_destroy.remove_hash_idx,
5169  					     pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
5170  					     pcmdinfo->post_sq);
5171  		break;
5172  	case IRDMA_OP_ALLOC_STAG:
5173  		status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
5174  					     &pcmdinfo->in.u.alloc_stag.info,
5175  					     pcmdinfo->in.u.alloc_stag.scratch,
5176  					     pcmdinfo->post_sq);
5177  		break;
5178  	case IRDMA_OP_MR_REG_NON_SHARED:
5179  		status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
5180  						    &pcmdinfo->in.u.mr_reg_non_shared.info,
5181  						    pcmdinfo->in.u.mr_reg_non_shared.scratch,
5182  						    pcmdinfo->post_sq);
5183  		break;
5184  	case IRDMA_OP_DEALLOC_STAG:
5185  		status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
5186  					       &pcmdinfo->in.u.dealloc_stag.info,
5187  					       pcmdinfo->in.u.dealloc_stag.scratch,
5188  					       pcmdinfo->post_sq);
5189  		break;
5190  	case IRDMA_OP_MW_ALLOC:
5191  		status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
5192  					   &pcmdinfo->in.u.mw_alloc.info,
5193  					   pcmdinfo->in.u.mw_alloc.scratch,
5194  					   pcmdinfo->post_sq);
5195  		break;
5196  	case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
5197  		status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
5198  						      &pcmdinfo->in.u.add_arp_cache_entry.info,
5199  						      pcmdinfo->in.u.add_arp_cache_entry.scratch,
5200  						      pcmdinfo->post_sq);
5201  		break;
5202  	case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
5203  		status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
5204  							pcmdinfo->in.u.alloc_local_mac_entry.scratch,
5205  							pcmdinfo->post_sq);
5206  		break;
5207  	case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
5208  		status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
5209  						      &pcmdinfo->in.u.add_local_mac_entry.info,
5210  						      pcmdinfo->in.u.add_local_mac_entry.scratch,
5211  						      pcmdinfo->post_sq);
5212  		break;
5213  	case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
5214  		status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
5215  						      pcmdinfo->in.u.del_local_mac_entry.scratch,
5216  						      pcmdinfo->in.u.del_local_mac_entry.entry_idx,
5217  						      pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
5218  						      pcmdinfo->post_sq);
5219  		break;
5220  	case IRDMA_OP_AH_CREATE:
5221  		status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
5222  					    &pcmdinfo->in.u.ah_create.info,
5223  					    pcmdinfo->in.u.ah_create.scratch);
5224  		break;
5225  	case IRDMA_OP_AH_DESTROY:
5226  		status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
5227  					     &pcmdinfo->in.u.ah_destroy.info,
5228  					     pcmdinfo->in.u.ah_destroy.scratch);
5229  		break;
5230  	case IRDMA_OP_MC_CREATE:
5231  		status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
5232  						   &pcmdinfo->in.u.mc_create.info,
5233  						   pcmdinfo->in.u.mc_create.scratch);
5234  		break;
5235  	case IRDMA_OP_MC_DESTROY:
5236  		status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
5237  						    &pcmdinfo->in.u.mc_destroy.info,
5238  						    pcmdinfo->in.u.mc_destroy.scratch);
5239  		break;
5240  	case IRDMA_OP_MC_MODIFY:
5241  		status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
5242  						   &pcmdinfo->in.u.mc_modify.info,
5243  						   pcmdinfo->in.u.mc_modify.scratch);
5244  		break;
5245  	default:
5246  		status = -EOPNOTSUPP;
5247  		break;
5248  	}
5249  
5250  	return status;
5251  }
5252  
5253  /**
5254   * irdma_process_cqp_cmd - process all cqp commands
5255   * @dev: sc device struct
5256   * @pcmdinfo: cqp command info
5257   */
irdma_process_cqp_cmd(struct irdma_sc_dev * dev,struct cqp_cmds_info * pcmdinfo)5258  int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
5259  			  struct cqp_cmds_info *pcmdinfo)
5260  {
5261  	int status = 0;
5262  	unsigned long flags;
5263  
5264  	spin_lock_irqsave(&dev->cqp_lock, flags);
5265  	if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
5266  		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5267  	else
5268  		list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
5269  	spin_unlock_irqrestore(&dev->cqp_lock, flags);
5270  	return status;
5271  }
5272  
5273  /**
5274   * irdma_process_bh - called from tasklet for cqp list
5275   * @dev: sc device struct
5276   */
irdma_process_bh(struct irdma_sc_dev * dev)5277  int irdma_process_bh(struct irdma_sc_dev *dev)
5278  {
5279  	int status = 0;
5280  	struct cqp_cmds_info *pcmdinfo;
5281  	unsigned long flags;
5282  
5283  	spin_lock_irqsave(&dev->cqp_lock, flags);
5284  	while (!list_empty(&dev->cqp_cmd_head) &&
5285  	       !irdma_cqp_ring_full(dev->cqp)) {
5286  		pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
5287  		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5288  		if (status)
5289  			break;
5290  	}
5291  	spin_unlock_irqrestore(&dev->cqp_lock, flags);
5292  	return status;
5293  }
5294  
5295  /**
5296   * irdma_cfg_aeq- Configure AEQ interrupt
5297   * @dev: pointer to the device structure
5298   * @idx: vector index
5299   * @enable: True to enable, False disables
5300   */
irdma_cfg_aeq(struct irdma_sc_dev * dev,u32 idx,bool enable)5301  void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
5302  {
5303  	u32 reg_val;
5304  
5305  	reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
5306  		  FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
5307  		  FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
5308  	writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
5309  }
5310  
5311  /**
5312   * sc_vsi_update_stats - Update statistics
5313   * @vsi: sc_vsi instance to update
5314   */
sc_vsi_update_stats(struct irdma_sc_vsi * vsi)5315  void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
5316  {
5317  	struct irdma_gather_stats *gather_stats;
5318  	struct irdma_gather_stats *last_gather_stats;
5319  
5320  	gather_stats = vsi->pestat->gather_info.gather_stats_va;
5321  	last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
5322  	irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
5323  			   last_gather_stats, vsi->dev->hw_stats_map,
5324  			   vsi->dev->hw_attrs.max_stat_idx);
5325  }
5326  
5327  /**
5328   * irdma_wait_pe_ready - Check if firmware is ready
5329   * @dev: provides access to registers
5330   */
irdma_wait_pe_ready(struct irdma_sc_dev * dev)5331  static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
5332  {
5333  	u32 statuscpu0;
5334  	u32 statuscpu1;
5335  	u32 statuscpu2;
5336  	u32 retrycount = 0;
5337  
5338  	do {
5339  		statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
5340  		statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
5341  		statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
5342  		if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
5343  		    statuscpu2 == 0x80)
5344  			return 0;
5345  		mdelay(1000);
5346  	} while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
5347  	return -1;
5348  }
5349  
irdma_sc_init_hw(struct irdma_sc_dev * dev)5350  static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
5351  {
5352  	switch (dev->hw_attrs.uk_attrs.hw_rev) {
5353  	case IRDMA_GEN_1:
5354  		i40iw_init_hw(dev);
5355  		break;
5356  	case IRDMA_GEN_2:
5357  		icrdma_init_hw(dev);
5358  		break;
5359  	}
5360  }
5361  
5362  /**
5363   * irdma_sc_dev_init - Initialize control part of device
5364   * @ver: version
5365   * @dev: Device pointer
5366   * @info: Device init info
5367   */
irdma_sc_dev_init(enum irdma_vers ver,struct irdma_sc_dev * dev,struct irdma_device_init_info * info)5368  int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
5369  		      struct irdma_device_init_info *info)
5370  {
5371  	u32 val;
5372  	int ret_code = 0;
5373  	u8 db_size;
5374  
5375  	INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
5376  	mutex_init(&dev->ws_mutex);
5377  	dev->hmc_fn_id = info->hmc_fn_id;
5378  	dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5379  	dev->fpm_query_buf = info->fpm_query_buf;
5380  	dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5381  	dev->fpm_commit_buf = info->fpm_commit_buf;
5382  	dev->hw = info->hw;
5383  	dev->hw->hw_addr = info->bar0;
5384  	/* Setup the hardware limits, hmc may limit further */
5385  	dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
5386  	dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
5387  	dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
5388  	dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
5389  	dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
5390  	dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
5391  	dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
5392  	dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
5393  	dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
5394  	dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
5395  	dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
5396  	dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
5397  	dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
5398  	dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
5399  	dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
5400  	dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
5401  
5402  	dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
5403  	dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
5404  	dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
5405  	dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
5406  
5407  	dev->hw_attrs.max_pe_ready_count = 14;
5408  	dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
5409  	dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
5410  	dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
5411  
5412  	dev->hw_attrs.uk_attrs.hw_rev = ver;
5413  	irdma_sc_init_hw(dev);
5414  
5415  	if (irdma_wait_pe_ready(dev))
5416  		return -ETIMEDOUT;
5417  
5418  	val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
5419  	db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
5420  	if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
5421  		ibdev_dbg(to_ibdev(dev),
5422  			  "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
5423  			  val, db_size);
5424  		return -ENODEV;
5425  	}
5426  	dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
5427  
5428  	return ret_code;
5429  }
5430  
5431  /**
5432   * irdma_stat_val - Extract HW counter value from statistics buffer
5433   * @stats_val: pointer to statistics buffer
5434   * @byteoff: byte offset of counter value in the buffer (8B-aligned)
5435   * @bitoff: bit offset of counter value within 8B entry
5436   * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter)
5437   */
irdma_stat_val(const u64 * stats_val,u16 byteoff,u8 bitoff,u64 bitmask)5438  static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, u8 bitoff,
5439  				 u64 bitmask)
5440  {
5441  	u16 idx = byteoff / sizeof(*stats_val);
5442  
5443  	return (stats_val[idx] >> bitoff) & bitmask;
5444  }
5445  
5446  /**
5447   * irdma_stat_delta - Calculate counter delta
5448   * @new_val: updated counter value
5449   * @old_val: last counter value
5450   * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter)
5451   */
irdma_stat_delta(u64 new_val,u64 old_val,u64 max_val)5452  static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val)
5453  {
5454  	if (new_val >= old_val)
5455  		return new_val - old_val;
5456  
5457  	/* roll-over case */
5458  	return max_val - old_val + new_val + 1;
5459  }
5460  
5461  /**
5462   * irdma_update_stats - Update statistics
5463   * @hw_stats: hw_stats instance to update
5464   * @gather_stats: updated stat counters
5465   * @last_gather_stats: last stat counters
5466   * @map: HW stat map (hw_stats => gather_stats)
5467   * @max_stat_idx: number of HW stats
5468   */
irdma_update_stats(struct irdma_dev_hw_stats * hw_stats,struct irdma_gather_stats * gather_stats,struct irdma_gather_stats * last_gather_stats,const struct irdma_hw_stat_map * map,u16 max_stat_idx)5469  void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
5470  			struct irdma_gather_stats *gather_stats,
5471  			struct irdma_gather_stats *last_gather_stats,
5472  			const struct irdma_hw_stat_map *map, u16 max_stat_idx)
5473  {
5474  	u64 *stats_val = hw_stats->stats_val;
5475  	u16 i;
5476  
5477  	for (i = 0; i < max_stat_idx; i++) {
5478  		u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff,
5479  					     map[i].bitoff, map[i].bitmask);
5480  		u64 last_val = irdma_stat_val(last_gather_stats->val,
5481  					      map[i].byteoff, map[i].bitoff,
5482  					      map[i].bitmask);
5483  
5484  		stats_val[i] +=
5485  			irdma_stat_delta(new_val, last_val, map[i].bitmask);
5486  	}
5487  
5488  	memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
5489  }
5490