1  /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2  /* QLogic qed NIC Driver
3   * Copyright (c) 2015-2017  QLogic Corporation
4   * Copyright (c) 2019-2020 Marvell International Ltd.
5   */
6  
7  #ifndef _QED_CXT_H
8  #define _QED_CXT_H
9  
10  #include <linux/types.h>
11  #include <linux/slab.h>
12  #include <linux/qed/qed_if.h>
13  #include "qed_hsi.h"
14  #include "qed.h"
15  
16  struct qed_cxt_info {
17  	void			*p_cxt;
18  	u32			iid;
19  	enum protocol_type	type;
20  };
21  
22  #define MAX_TID_BLOCKS                  512
23  struct qed_tid_mem {
24  	u32 tid_size;
25  	u32 num_tids_per_block;
26  	u32 waste;
27  	u8 *blocks[MAX_TID_BLOCKS];	/* 4K */
28  };
29  
30  /**
31   * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
32   *
33   * @p_hwfn: HW device data.
34   * @p_info: In/out.
35   *
36   * Return: Int.
37   */
38  int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
39  			 struct qed_cxt_info *p_info);
40  
41  /**
42   * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
43   *
44   * @p_hwfn: HW device data.
45   * @p_info: in/out.
46   *
47   * Return: int.
48   */
49  int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
50  			     struct qed_tid_mem *p_info);
51  
52  #define QED_CXT_TCP_ULP_TID_SEG	PROTOCOLID_TCP_ULP
53  #define QED_CXT_ROCE_TID_SEG	PROTOCOLID_ROCE
54  #define QED_CXT_FCOE_TID_SEG	PROTOCOLID_FCOE
55  enum qed_cxt_elem_type {
56  	QED_ELEM_CXT,
57  	QED_ELEM_SRQ,
58  	QED_ELEM_TASK,
59  	QED_ELEM_XRC_SRQ,
60  };
61  
62  u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
63  				enum protocol_type type, u32 *vf_cid);
64  
65  /**
66   * qed_cxt_set_pf_params(): Set the PF params for cxt init.
67   *
68   * @p_hwfn: HW device data.
69   * @rdma_tasks: Requested maximum.
70   *
71   * Return: int.
72   */
73  int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
74  
75  /**
76   * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
77   *
78   * @p_hwfn: HW device data.
79   * @last_line: Last_line.
80   *
81   * Return: Int
82   */
83  int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
84  
85  /**
86   * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
87   *
88   * @p_hwfn: HW device data.
89   * @used_lines: Used lines.
90   *
91   * Return: Int.
92   */
93  u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
94  
95  /**
96   * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
97   *
98   * @p_hwfn: HW device data.
99   *
100   * Return: Int.
101   */
102  int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
103  
104  /**
105   * qed_cxt_mngr_free() - Context manager free.
106   *
107   * @p_hwfn: HW device data.
108   *
109   * Return: Void.
110   */
111  void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
112  
113  /**
114   * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
115   *
116   * @p_hwfn: HW device data.
117   *
118   * Return: Int.
119   */
120  int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
121  
122  /**
123   * qed_cxt_mngr_setup(): Reset the acquired CIDs.
124   *
125   * @p_hwfn: HW device data.
126   */
127  void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
128  
129  /**
130   * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
131   *
132   * @p_hwfn: HW device data.
133   *
134   * Return: Void.
135   */
136  void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
137  
138  /**
139   * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
140   *
141   * @p_hwfn: HW device data.
142   * @p_ptt: P_ptt.
143   *
144   * Return: Void.
145   */
146  void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
147  
148  /**
149   * qed_qm_init_pf(): Initailze the QM PF phase, per path.
150   *
151   * @p_hwfn: HW device data.
152   * @p_ptt: P_ptt.
153   * @is_pf_loading: Is pf pending.
154   *
155   * Return: Void.
156   */
157  void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
158  		    struct qed_ptt *p_ptt, bool is_pf_loading);
159  
160  /**
161   * qed_qm_reconf(): Reconfigures QM pf on the fly.
162   *
163   * @p_hwfn: HW device data.
164   * @p_ptt: P_ptt.
165   *
166   * Return: Int.
167   */
168  int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
169  
170  #define QED_CXT_PF_CID (0xff)
171  
172  /**
173   * qed_cxt_release_cid(): Release a cid.
174   *
175   * @p_hwfn: HW device data.
176   * @cid: Cid.
177   *
178   * Return: Void.
179   */
180  void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
181  
182  /**
183   * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
184   *
185   * @p_hwfn: HW device data.
186   * @cid: Cid.
187   * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
188   *
189   * Return: Void.
190   */
191  void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
192  
193  /**
194   * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
195   *
196   * @p_hwfn: HW device data.
197   * @type: Type.
198   * @p_cid: Pointer cid.
199   *
200   * Return: Int.
201   */
202  int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
203  			enum protocol_type type, u32 *p_cid);
204  
205  /**
206   * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
207   *                         for a vf-queue.
208   *
209   * @p_hwfn: HW device data.
210   * @type: Type.
211   * @p_cid: Pointer cid.
212   * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
213   *
214   * Return: Int.
215   */
216  int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
217  			 enum protocol_type type, u32 *p_cid, u8 vfid);
218  
219  int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
220  			      enum qed_cxt_elem_type elem_type, u32 iid);
221  u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
222  				enum protocol_type type);
223  u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
224  				enum protocol_type type);
225  int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
226  
227  #define QED_CTX_WORKING_MEM 0
228  #define QED_CTX_FL_MEM 1
229  int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
230  			 u32 tid, u8 ctx_type, void **task_ctx);
231  
232  /* Max number of connection types in HW (DQ/CDU etc.) */
233  #define MAX_CONN_TYPES          PROTOCOLID_COMMON
234  #define NUM_TASK_TYPES          2
235  #define NUM_TASK_PF_SEGMENTS    4
236  #define NUM_TASK_VF_SEGMENTS    1
237  
238  /* PF per protocl configuration object */
239  #define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
240  #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
241  
242  struct qed_tid_seg {
243  	u32 count;
244  	u8 type;
245  	bool has_fl_mem;
246  };
247  
248  struct qed_conn_type_cfg {
249  	u32 cid_count;
250  	u32 cids_per_vf;
251  	struct qed_tid_seg tid_seg[TASK_SEGMENTS];
252  };
253  
254  /* ILT Client configuration,
255   * Per connection type (protocol) resources (cids, tis, vf cids etc.)
256   * 1 - for connection context (CDUC) and for each task context we need two
257   * values, for regular task context and for force load memory
258   */
259  #define ILT_CLI_PF_BLOCKS       (1 + NUM_TASK_PF_SEGMENTS * 2)
260  #define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
261  #define CDUC_BLK                (0)
262  #define SRQ_BLK                 (0)
263  #define CDUT_SEG_BLK(n)         (1 + (u8)(n))
264  #define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
265  
266  struct ilt_cfg_pair {
267  	u32 reg;
268  	u32 val;
269  };
270  
271  struct qed_ilt_cli_blk {
272  	u32 total_size;		/* 0 means not active */
273  	u32 real_size_in_page;
274  	u32 start_line;
275  	u32 dynamic_line_offset;
276  	u32 dynamic_line_cnt;
277  };
278  
279  struct qed_ilt_client_cfg {
280  	bool active;
281  
282  	/* ILT boundaries */
283  	struct ilt_cfg_pair first;
284  	struct ilt_cfg_pair last;
285  	struct ilt_cfg_pair p_size;
286  
287  	/* ILT client blocks for PF */
288  	struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
289  	u32 pf_total_lines;
290  
291  	/* ILT client blocks for VFs */
292  	struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
293  	u32 vf_total_lines;
294  };
295  
296  struct qed_cid_acquired_map {
297  	u32		start_cid;
298  	u32		max_count;
299  	unsigned long	*cid_map;
300  };
301  
302  struct qed_src_t2 {
303  	struct phys_mem_desc *dma_mem;
304  	u32 num_pages;
305  	u64 first_free;
306  	u64 last_free;
307  };
308  
309  struct qed_cxt_mngr {
310  	/* Per protocl configuration */
311  	struct qed_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
312  
313  	/* computed ILT structure */
314  	struct qed_ilt_client_cfg	clients[MAX_ILT_CLIENTS];
315  
316  	/* Task type sizes */
317  	u32 task_type_size[NUM_TASK_TYPES];
318  
319  	/* total number of VFs for this hwfn -
320  	 * ALL VFs are symmetric in terms of HW resources
321  	 */
322  	u32 vf_count;
323  	u32 first_vf_in_pf;
324  
325  	/* Acquired CIDs */
326  	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
327  
328  	struct qed_cid_acquired_map
329  	acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
330  
331  	/* ILT  shadow table */
332  	struct phys_mem_desc *ilt_shadow;
333  	u32 ilt_shadow_size;
334  	u32 pf_start_line;
335  
336  	/* Mutex for a dynamic ILT allocation */
337  	struct mutex mutex;
338  
339  	/* SRC T2 */
340  	struct qed_src_t2 src_t2;
341  
342  	/* total number of SRQ's for this hwfn */
343  	u32 srq_count;
344  	u32 xrc_srq_count;
345  
346  	/* Maximal number of L2 steering filters */
347  	u32 arfs_count;
348  
349  	u16 iscsi_task_pages;
350  	u16 fcoe_task_pages;
351  	u16 roce_task_pages;
352  	u16 eth_task_pages;
353  	u16 task_ctx_size;
354  	u16 conn_ctx_size;
355  };
356  
357  u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
358  u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
359  u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
360  u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
361  
362  u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
363  			      enum ilt_clients ilt_client);
364  
365  u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
366  
367  #endif
368