1  /*
2   * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
3   * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4   *
5   * This software is available to you under a choice of one of two
6   * licenses.  You may choose to be licensed under the terms of the GNU
7   * General Public License (GPL) Version 2, available from the file
8   * COPYING in the main directory of this source tree, or the
9   * OpenIB.org BSD license below:
10   *
11   *     Redistribution and use in source and binary forms, with or
12   *     without modification, are permitted provided that the following
13   *     conditions are met:
14   *
15   *      - Redistributions of source code must retain the above
16   *        copyright notice, this list of conditions and the following
17   *        disclaimer.
18   *
19   *      - Redistributions in binary form must reproduce the above
20   *        copyright notice, this list of conditions and the following
21   *        disclaimer in the documentation and/or other materials
22   *        provided with the distribution.
23   *
24   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31   * SOFTWARE.
32   */
33  
34  #ifndef MLX4_IB_H
35  #define MLX4_IB_H
36  
37  #include <linux/compiler.h>
38  #include <linux/list.h>
39  #include <linux/mutex.h>
40  #include <linux/idr.h>
41  #include <linux/notifier.h>
42  
43  #include <rdma/ib_verbs.h>
44  #include <rdma/ib_umem.h>
45  #include <rdma/ib_mad.h>
46  #include <rdma/ib_sa.h>
47  
48  #include <linux/mlx4/device.h>
49  #include <linux/mlx4/doorbell.h>
50  #include <linux/mlx4/qp.h>
51  #include <linux/mlx4/cq.h>
52  
53  #define MLX4_IB_DRV_NAME	"mlx4_ib"
54  
55  #ifdef pr_fmt
56  #undef pr_fmt
57  #endif
58  #define pr_fmt(fmt)	"<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
59  
60  #define mlx4_ib_warn(ibdev, format, arg...) \
61  	dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg)
62  
63  enum {
64  	MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
65  	MLX4_IB_MAX_HEADROOM	 = 2048
66  };
67  
68  #define MLX4_IB_SQ_HEADROOM(shift)	((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
69  #define MLX4_IB_SQ_MAX_SPARE		(MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
70  
71  /*module param to indicate if SM assigns the alias_GUID*/
72  extern int mlx4_ib_sm_guid_assign;
73  
74  #define MLX4_IB_UC_STEER_QPN_ALIGN 1
75  #define MLX4_IB_UC_MAX_NUM_QPS     256
76  
77  enum hw_bar_type {
78  	HW_BAR_BF,
79  	HW_BAR_DB,
80  	HW_BAR_CLOCK,
81  	HW_BAR_COUNT
82  };
83  
84  struct mlx4_ib_ucontext {
85  	struct ib_ucontext	ibucontext;
86  	struct mlx4_uar		uar;
87  	struct list_head	db_page_list;
88  	struct mutex		db_page_mutex;
89  	struct list_head	wqn_ranges_list;
90  	struct mutex		wqn_ranges_mutex; /* protect wqn_ranges_list */
91  };
92  
93  struct mlx4_ib_pd {
94  	struct ib_pd		ibpd;
95  	u32			pdn;
96  };
97  
98  struct mlx4_ib_xrcd {
99  	struct ib_xrcd		ibxrcd;
100  	u32			xrcdn;
101  	struct ib_pd	       *pd;
102  	struct ib_cq	       *cq;
103  };
104  
105  struct mlx4_ib_cq_buf {
106  	struct mlx4_buf		buf;
107  	struct mlx4_mtt		mtt;
108  	int			entry_size;
109  };
110  
111  struct mlx4_ib_cq_resize {
112  	struct mlx4_ib_cq_buf	buf;
113  	int			cqe;
114  };
115  
116  struct mlx4_ib_cq {
117  	struct ib_cq		ibcq;
118  	struct mlx4_cq		mcq;
119  	struct mlx4_ib_cq_buf	buf;
120  	struct mlx4_ib_cq_resize *resize_buf;
121  	struct mlx4_db		db;
122  	spinlock_t		lock;
123  	struct mutex		resize_mutex;
124  	struct ib_umem	       *umem;
125  	struct ib_umem	       *resize_umem;
126  	int			create_flags;
127  	/* List of qps that it serves.*/
128  	struct list_head		send_qp_list;
129  	struct list_head		recv_qp_list;
130  };
131  
132  #define MLX4_MR_PAGES_ALIGN 0x40
133  
134  struct mlx4_ib_mr {
135  	struct ib_mr		ibmr;
136  	__be64			*pages;
137  	dma_addr_t		page_map;
138  	u32			npages;
139  	u32			max_pages;
140  	struct mlx4_mr		mmr;
141  	struct ib_umem	       *umem;
142  	size_t			page_map_size;
143  };
144  
145  struct mlx4_ib_mw {
146  	struct ib_mw		ibmw;
147  	struct mlx4_mw		mmw;
148  };
149  
150  #define MAX_REGS_PER_FLOW 2
151  
152  struct mlx4_flow_reg_id {
153  	u64 id;
154  	u64 mirror;
155  };
156  
157  struct mlx4_ib_flow {
158  	struct ib_flow ibflow;
159  	/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
160  	struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
161  };
162  
163  struct mlx4_ib_wq {
164  	u64		       *wrid;
165  	spinlock_t		lock;
166  	int			wqe_cnt;
167  	int			max_post;
168  	int			max_gs;
169  	int			offset;
170  	int			wqe_shift;
171  	unsigned		head;
172  	unsigned		tail;
173  };
174  
175  enum {
176  	MLX4_IB_QP_CREATE_ROCE_V2_GSI = IB_QP_CREATE_RESERVED_START
177  };
178  
179  enum mlx4_ib_qp_flags {
180  	MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
181  	MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
182  	MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
183  	MLX4_IB_QP_SCATTER_FCS = IB_QP_CREATE_SCATTER_FCS,
184  
185  	/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
186  	MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
187  	MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
188  	MLX4_IB_SRIOV_SQP = 1 << 31,
189  };
190  
191  struct mlx4_ib_gid_entry {
192  	struct list_head	list;
193  	union ib_gid		gid;
194  	int			added;
195  	u8			port;
196  };
197  
198  enum mlx4_ib_qp_type {
199  	/*
200  	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
201  	 * here (and in that order) since the MAD layer uses them as
202  	 * indices into a 2-entry table.
203  	 */
204  	MLX4_IB_QPT_SMI = IB_QPT_SMI,
205  	MLX4_IB_QPT_GSI = IB_QPT_GSI,
206  
207  	MLX4_IB_QPT_RC = IB_QPT_RC,
208  	MLX4_IB_QPT_UC = IB_QPT_UC,
209  	MLX4_IB_QPT_UD = IB_QPT_UD,
210  	MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
211  	MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
212  	MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
213  	MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
214  	MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
215  
216  	MLX4_IB_QPT_PROXY_SMI_OWNER	= 1 << 16,
217  	MLX4_IB_QPT_PROXY_SMI		= 1 << 17,
218  	MLX4_IB_QPT_PROXY_GSI		= 1 << 18,
219  	MLX4_IB_QPT_TUN_SMI_OWNER	= 1 << 19,
220  	MLX4_IB_QPT_TUN_SMI		= 1 << 20,
221  	MLX4_IB_QPT_TUN_GSI		= 1 << 21,
222  };
223  
224  #define MLX4_IB_QPT_ANY_SRIOV	(MLX4_IB_QPT_PROXY_SMI_OWNER | \
225  	MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
226  	MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
227  
228  enum mlx4_ib_mad_ifc_flags {
229  	MLX4_MAD_IFC_IGNORE_MKEY	= 1,
230  	MLX4_MAD_IFC_IGNORE_BKEY	= 2,
231  	MLX4_MAD_IFC_IGNORE_KEYS	= (MLX4_MAD_IFC_IGNORE_MKEY |
232  					   MLX4_MAD_IFC_IGNORE_BKEY),
233  	MLX4_MAD_IFC_NET_VIEW		= 4,
234  };
235  
236  enum {
237  	MLX4_NUM_TUNNEL_BUFS		= 512,
238  	MLX4_NUM_WIRE_BUFS		= 2048,
239  };
240  
241  struct mlx4_ib_tunnel_header {
242  	struct mlx4_av av;
243  	__be32 remote_qpn;
244  	__be32 qkey;
245  	__be16 vlan;
246  	u8 mac[6];
247  	__be16 pkey_index;
248  	u8 reserved[6];
249  };
250  
251  struct mlx4_ib_buf {
252  	void *addr;
253  	dma_addr_t map;
254  };
255  
256  struct mlx4_rcv_tunnel_hdr {
257  	__be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
258  			      * 0x0 - no vlan was in the packet
259  			      * 0x01 - C-VLAN was in the packet */
260  	u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
261  	u8 reserved;
262  	__be16 pkey_index;
263  	__be16 sl_vid;
264  	__be16 slid_mac_47_32;
265  	__be32 mac_31_0;
266  };
267  
268  struct mlx4_ib_proxy_sqp_hdr {
269  	struct ib_grh grh;
270  	struct mlx4_rcv_tunnel_hdr tun;
271  }  __packed;
272  
273  struct mlx4_roce_smac_vlan_info {
274  	u64 smac;
275  	int smac_index;
276  	int smac_port;
277  	u64 candidate_smac;
278  	int candidate_smac_index;
279  	int candidate_smac_port;
280  	u16 vid;
281  	int vlan_index;
282  	int vlan_port;
283  	u16 candidate_vid;
284  	int candidate_vlan_index;
285  	int candidate_vlan_port;
286  	int update_vid;
287  };
288  
289  struct mlx4_wqn_range {
290  	int			base_wqn;
291  	int			size;
292  	int			refcount;
293  	bool			dirty;
294  	struct list_head	list;
295  };
296  
297  struct mlx4_ib_rss {
298  	unsigned int		base_qpn_tbl_sz;
299  	u8			flags;
300  	u8			rss_key[MLX4_EN_RSS_KEY_SIZE];
301  };
302  
303  enum {
304  	/*
305  	 * Largest possible UD header: send with GRH and immediate
306  	 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
307  	 * tag.  (LRH would only use 8 bytes, so Ethernet is the
308  	 * biggest case)
309  	 */
310  	MLX4_IB_UD_HEADER_SIZE		= 82,
311  	MLX4_IB_LSO_HEADER_SPARE	= 128,
312  };
313  
314  struct mlx4_ib_sqp {
315  	int pkey_index;
316  	u32 qkey;
317  	u32 send_psn;
318  	struct ib_ud_header ud_header;
319  	u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
320  	struct ib_qp *roce_v2_gsi;
321  };
322  
323  struct mlx4_ib_qp {
324  	union {
325  		struct ib_qp	ibqp;
326  		struct ib_wq	ibwq;
327  	};
328  	struct mlx4_qp		mqp;
329  	struct mlx4_buf		buf;
330  
331  	struct mlx4_db		db;
332  	struct mlx4_ib_wq	rq;
333  
334  	u32			doorbell_qpn;
335  	__be32			sq_signal_bits;
336  	unsigned		sq_next_wqe;
337  	int			sq_spare_wqes;
338  	struct mlx4_ib_wq	sq;
339  
340  	enum mlx4_ib_qp_type	mlx4_ib_qp_type;
341  	struct ib_umem	       *umem;
342  	struct mlx4_mtt		mtt;
343  	int			buf_size;
344  	struct mutex		mutex;
345  	u16			xrcdn;
346  	u32			flags;
347  	u8			port;
348  	u8			alt_port;
349  	u8			atomic_rd_en;
350  	u8			resp_depth;
351  	u8			sq_no_prefetch;
352  	u8			state;
353  	int			mlx_type;
354  	u32			inl_recv_sz;
355  	struct list_head	gid_list;
356  	struct list_head	steering_rules;
357  	struct mlx4_ib_buf	*sqp_proxy_rcv;
358  	struct mlx4_roce_smac_vlan_info pri;
359  	struct mlx4_roce_smac_vlan_info alt;
360  	u64			reg_id;
361  	struct list_head	qps_list;
362  	struct list_head	cq_recv_list;
363  	struct list_head	cq_send_list;
364  	struct counter_index	*counter_index;
365  	struct mlx4_wqn_range	*wqn_range;
366  	/* Number of RSS QP parents that uses this WQ */
367  	u32			rss_usecnt;
368  	union {
369  		struct mlx4_ib_rss *rss_ctx;
370  		struct mlx4_ib_sqp *sqp;
371  	};
372  };
373  
374  struct mlx4_ib_srq {
375  	struct ib_srq		ibsrq;
376  	struct mlx4_srq		msrq;
377  	struct mlx4_buf		buf;
378  	struct mlx4_db		db;
379  	u64		       *wrid;
380  	spinlock_t		lock;
381  	int			head;
382  	int			tail;
383  	u16			wqe_ctr;
384  	struct ib_umem	       *umem;
385  	struct mlx4_mtt		mtt;
386  	struct mutex		mutex;
387  };
388  
389  struct mlx4_ib_ah {
390  	struct ib_ah		ibah;
391  	union mlx4_ext_av       av;
392  };
393  
394  struct mlx4_ib_rwq_ind_table {
395  	struct ib_rwq_ind_table ib_rwq_ind_tbl;
396  };
397  
398  /****************************************/
399  /* alias guid support */
400  /****************************************/
401  #define NUM_PORT_ALIAS_GUID		2
402  #define NUM_ALIAS_GUID_IN_REC		8
403  #define NUM_ALIAS_GUID_REC_IN_PORT	16
404  #define GUID_REC_SIZE			8
405  #define NUM_ALIAS_GUID_PER_PORT		128
406  #define MLX4_NOT_SET_GUID		(0x00LL)
407  #define MLX4_GUID_FOR_DELETE_VAL	(~(0x00LL))
408  
409  enum mlx4_guid_alias_rec_status {
410  	MLX4_GUID_INFO_STATUS_IDLE,
411  	MLX4_GUID_INFO_STATUS_SET,
412  };
413  
414  #define GUID_STATE_NEED_PORT_INIT 0x01
415  
416  enum mlx4_guid_alias_rec_method {
417  	MLX4_GUID_INFO_RECORD_SET	= IB_MGMT_METHOD_SET,
418  	MLX4_GUID_INFO_RECORD_DELETE	= IB_SA_METHOD_DELETE,
419  };
420  
421  struct mlx4_sriov_alias_guid_info_rec_det {
422  	u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
423  	ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
424  	enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
425  	unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
426  	u64 time_to_run;
427  };
428  
429  struct mlx4_sriov_alias_guid_port_rec_det {
430  	struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
431  	struct workqueue_struct *wq;
432  	struct delayed_work alias_guid_work;
433  	u32 port;
434  	u32 state_flags;
435  	struct mlx4_sriov_alias_guid *parent;
436  	struct list_head cb_list;
437  };
438  
439  struct mlx4_sriov_alias_guid {
440  	struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
441  	spinlock_t ag_work_lock;
442  	struct ib_sa_client *sa_client;
443  };
444  
445  struct mlx4_ib_demux_work {
446  	struct work_struct	work;
447  	struct mlx4_ib_dev     *dev;
448  	int			slave;
449  	int			do_init;
450  	u8			port;
451  
452  };
453  
454  struct mlx4_ib_tun_tx_buf {
455  	struct mlx4_ib_buf buf;
456  	struct ib_ah *ah;
457  };
458  
459  struct mlx4_ib_demux_pv_qp {
460  	struct ib_qp *qp;
461  	enum ib_qp_type proxy_qpt;
462  	struct mlx4_ib_buf *ring;
463  	struct mlx4_ib_tun_tx_buf *tx_ring;
464  	spinlock_t tx_lock;
465  	unsigned tx_ix_head;
466  	unsigned tx_ix_tail;
467  };
468  
469  enum mlx4_ib_demux_pv_state {
470  	DEMUX_PV_STATE_DOWN,
471  	DEMUX_PV_STATE_STARTING,
472  	DEMUX_PV_STATE_ACTIVE,
473  	DEMUX_PV_STATE_DOWNING,
474  };
475  
476  struct mlx4_ib_demux_pv_ctx {
477  	int port;
478  	int slave;
479  	enum mlx4_ib_demux_pv_state state;
480  	int has_smi;
481  	struct ib_device *ib_dev;
482  	struct ib_cq *cq;
483  	struct ib_pd *pd;
484  	struct work_struct work;
485  	struct workqueue_struct *wq;
486  	struct workqueue_struct *wi_wq;
487  	struct mlx4_ib_demux_pv_qp qp[2];
488  };
489  
490  struct mlx4_ib_demux_ctx {
491  	struct ib_device *ib_dev;
492  	int port;
493  	struct workqueue_struct *wq;
494  	struct workqueue_struct *wi_wq;
495  	struct workqueue_struct *ud_wq;
496  	spinlock_t ud_lock;
497  	atomic64_t subnet_prefix;
498  	__be64 guid_cache[128];
499  	struct mlx4_ib_dev *dev;
500  	/* the following lock protects both mcg_table and mcg_mgid0_list */
501  	struct mutex		mcg_table_lock;
502  	struct rb_root		mcg_table;
503  	struct list_head	mcg_mgid0_list;
504  	struct workqueue_struct	*mcg_wq;
505  	struct mlx4_ib_demux_pv_ctx **tun;
506  	atomic_t tid;
507  	int    flushing; /* flushing the work queue */
508  };
509  
510  struct mlx4_ib_sriov {
511  	struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
512  	struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
513  	/* when using this spinlock you should use "irq" because
514  	 * it may be called from interrupt context.*/
515  	spinlock_t going_down_lock;
516  	int is_going_down;
517  
518  	struct mlx4_sriov_alias_guid alias_guid;
519  
520  	/* CM paravirtualization fields */
521  	struct xarray pv_id_table;
522  	u32 pv_id_next;
523  	spinlock_t id_map_lock;
524  	struct rb_root sl_id_map;
525  	struct list_head cm_list;
526  	struct xarray xa_rej_tmout;
527  };
528  
529  struct gid_cache_context {
530  	int real_index;
531  	int refcount;
532  };
533  
534  struct gid_entry {
535  	union ib_gid	gid;
536  	enum ib_gid_type gid_type;
537  	struct gid_cache_context *ctx;
538  	u16 vlan_id;
539  };
540  
541  struct mlx4_port_gid_table {
542  	struct gid_entry gids[MLX4_MAX_PORT_GIDS];
543  };
544  
545  struct mlx4_ib_iboe {
546  	spinlock_t		lock;
547  	struct net_device      *netdevs[MLX4_MAX_PORTS];
548  	atomic64_t		mac[MLX4_MAX_PORTS];
549  	struct notifier_block 	nb;
550  	struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
551  	enum ib_port_state	last_port_state[MLX4_MAX_PORTS];
552  };
553  
554  struct pkey_mgt {
555  	u8			virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
556  	u16			phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
557  	struct list_head	pkey_port_list[MLX4_MFUNC_MAX];
558  	struct kobject	       *device_parent[MLX4_MFUNC_MAX];
559  };
560  
561  struct mlx4_ib_iov_sysfs_attr {
562  	void *ctx;
563  	struct kobject *kobj;
564  	unsigned long data;
565  	u32 entry_num;
566  	char name[15];
567  	struct device_attribute dentry;
568  	struct device *dev;
569  };
570  
571  struct mlx4_ib_iov_sysfs_attr_ar {
572  	struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
573  };
574  
575  struct mlx4_ib_iov_port {
576  	char name[100];
577  	u8 num;
578  	struct mlx4_ib_dev *dev;
579  	struct list_head list;
580  	struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
581  	struct ib_port_attr attr;
582  	struct kobject	*cur_port;
583  	struct kobject	*admin_alias_parent;
584  	struct kobject	*gids_parent;
585  	struct kobject	*pkeys_parent;
586  	struct kobject	*mcgs_parent;
587  	struct mlx4_ib_iov_sysfs_attr mcg_dentry;
588  };
589  
590  struct counter_index {
591  	struct  list_head       list;
592  	u32		index;
593  	u8		allocated;
594  };
595  
596  struct mlx4_ib_counters {
597  	struct list_head        counters_list;
598  	struct mutex            mutex; /* mutex for accessing counters list */
599  	u32			default_counter;
600  };
601  
602  #define MLX4_DIAG_COUNTERS_TYPES 2
603  
604  struct mlx4_ib_diag_counters {
605  	struct rdma_stat_desc *descs;
606  	u32 *offset;
607  	u32 num_counters;
608  };
609  
610  struct mlx4_ib_dev {
611  	struct ib_device	ib_dev;
612  	struct mlx4_dev	       *dev;
613  	int			num_ports;
614  	void __iomem	       *uar_map;
615  
616  	struct mlx4_uar		priv_uar;
617  	u32			priv_pdn;
618  	MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
619  
620  	struct ib_mad_agent    *send_agent[MLX4_MAX_PORTS][2];
621  	struct ib_ah	       *sm_ah[MLX4_MAX_PORTS];
622  	spinlock_t		sm_lock;
623  	atomic64_t		sl2vl[MLX4_MAX_PORTS];
624  	struct mlx4_ib_sriov	sriov;
625  
626  	struct mutex		cap_mask_mutex;
627  	bool			ib_active;
628  	struct mlx4_ib_iboe	iboe;
629  	struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS];
630  	int		       *eq_table;
631  	struct kobject	       *iov_parent;
632  	struct kobject	       *ports_parent;
633  	struct kobject	       *dev_ports_parent[MLX4_MFUNC_MAX];
634  	struct mlx4_ib_iov_port	iov_ports[MLX4_MAX_PORTS];
635  	struct pkey_mgt		pkeys;
636  	unsigned long *ib_uc_qpns_bitmap;
637  	int steer_qpn_count;
638  	int steer_qpn_base;
639  	int steering_support;
640  	struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
641  	/* lock when destroying qp1_proxy and getting netdev events */
642  	struct mutex		qp1_proxy_lock[MLX4_MAX_PORTS];
643  	u8			bond_next_port;
644  	/* protect resources needed as part of reset flow */
645  	spinlock_t		reset_flow_resource_lock;
646  	struct list_head		qp_list;
647  	struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
648  	struct notifier_block	mlx_nb;
649  };
650  
651  struct ib_event_work {
652  	struct work_struct	work;
653  	struct mlx4_ib_dev	*ib_dev;
654  	struct mlx4_eqe		ib_eqe;
655  	int			port;
656  };
657  
658  struct mlx4_ib_qp_tunnel_init_attr {
659  	struct ib_qp_init_attr init_attr;
660  	int slave;
661  	enum ib_qp_type proxy_qp_type;
662  	u32 port;
663  };
664  
665  struct mlx4_uverbs_ex_query_device {
666  	__u32 comp_mask;
667  	__u32 reserved;
668  };
669  
to_mdev(struct ib_device * ibdev)670  static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
671  {
672  	return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
673  }
674  
to_mucontext(struct ib_ucontext * ibucontext)675  static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
676  {
677  	return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
678  }
679  
to_mpd(struct ib_pd * ibpd)680  static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
681  {
682  	return container_of(ibpd, struct mlx4_ib_pd, ibpd);
683  }
684  
to_mxrcd(struct ib_xrcd * ibxrcd)685  static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
686  {
687  	return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
688  }
689  
to_mcq(struct ib_cq * ibcq)690  static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
691  {
692  	return container_of(ibcq, struct mlx4_ib_cq, ibcq);
693  }
694  
to_mibcq(struct mlx4_cq * mcq)695  static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
696  {
697  	return container_of(mcq, struct mlx4_ib_cq, mcq);
698  }
699  
to_mmr(struct ib_mr * ibmr)700  static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
701  {
702  	return container_of(ibmr, struct mlx4_ib_mr, ibmr);
703  }
704  
to_mmw(struct ib_mw * ibmw)705  static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
706  {
707  	return container_of(ibmw, struct mlx4_ib_mw, ibmw);
708  }
709  
to_mflow(struct ib_flow * ibflow)710  static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
711  {
712  	return container_of(ibflow, struct mlx4_ib_flow, ibflow);
713  }
714  
to_mqp(struct ib_qp * ibqp)715  static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
716  {
717  	return container_of(ibqp, struct mlx4_ib_qp, ibqp);
718  }
719  
to_mibqp(struct mlx4_qp * mqp)720  static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
721  {
722  	return container_of(mqp, struct mlx4_ib_qp, mqp);
723  }
724  
to_msrq(struct ib_srq * ibsrq)725  static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
726  {
727  	return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
728  }
729  
to_mibsrq(struct mlx4_srq * msrq)730  static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
731  {
732  	return container_of(msrq, struct mlx4_ib_srq, msrq);
733  }
734  
to_mah(struct ib_ah * ibah)735  static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
736  {
737  	return container_of(ibah, struct mlx4_ib_ah, ibah);
738  }
739  
mlx4_ib_bond_next_port(struct mlx4_ib_dev * dev)740  static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
741  {
742  	dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
743  
744  	return dev->bond_next_port + 1;
745  }
746  
747  int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
748  void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
749  
750  int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
751  			struct mlx4_db *db);
752  void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
753  
754  struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
755  int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
756  			   struct ib_umem *umem);
757  struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
758  				  u64 virt_addr, int access_flags,
759  				  struct ib_udata *udata);
760  int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
761  int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
762  int mlx4_ib_dealloc_mw(struct ib_mw *mw);
763  struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
764  			       u32 max_num_sg);
765  int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
766  		      unsigned int *sg_offset);
767  int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
768  int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
769  int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
770  		      struct uverbs_attr_bundle *attrs);
771  int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
772  int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
773  int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
774  void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
775  void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
776  
777  int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
778  		      struct ib_udata *udata);
779  int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
780  			    int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
781  int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
mlx4_ib_destroy_ah(struct ib_ah * ah,u32 flags)782  static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
783  {
784  	return 0;
785  }
786  
787  int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
788  		       struct ib_udata *udata);
789  int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
790  		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
791  int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
792  int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
793  void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
794  int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
795  			  const struct ib_recv_wr **bad_wr);
796  
797  int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
798  		      struct ib_udata *udata);
799  int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
800  void mlx4_ib_drain_sq(struct ib_qp *qp);
801  void mlx4_ib_drain_rq(struct ib_qp *qp);
802  int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
803  		      int attr_mask, struct ib_udata *udata);
804  int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
805  		     struct ib_qp_init_attr *qp_init_attr);
806  int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
807  		      const struct ib_send_wr **bad_wr);
808  int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
809  		      const struct ib_recv_wr **bad_wr);
810  
811  int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
812  		 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
813  		 const void *in_mad, void *response_mad);
814  int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
815  			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
816  			const struct ib_mad *in, struct ib_mad *out,
817  			size_t *out_mad_size, u16 *out_mad_pkey_index);
818  int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
819  void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
820  
821  int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
822  			 struct ib_port_attr *props, int netw_view);
823  int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
824  			 u16 *pkey, int netw_view);
825  
826  int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
827  			union ib_gid *gid, int netw_view);
828  
mlx4_ib_ah_grh_present(struct mlx4_ib_ah * ah)829  static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
830  {
831  	u32 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
832  
833  	if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
834  		return true;
835  
836  	return !!(ah->av.ib.g_slid & 0x80);
837  }
838  
839  int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
840  void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
841  void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
842  int mlx4_ib_mcg_init(void);
843  void mlx4_ib_mcg_destroy(void);
844  
845  int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid);
846  
847  int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
848  				  struct ib_sa_mad *sa_mad);
849  int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
850  			      struct ib_sa_mad *mad);
851  
852  int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
853  		   union ib_gid *gid);
854  
855  void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
856  			    enum ib_event_type type);
857  
858  void mlx4_ib_tunnels_update_work(struct work_struct *work);
859  
860  int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
861  			  enum ib_qp_type qpt, struct ib_wc *wc,
862  			  struct ib_grh *grh, struct ib_mad *mad);
863  
864  int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
865  			 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
866  			 u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac,
867  			 u16 vlan_id, struct ib_mad *mad);
868  
869  __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
870  
871  int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
872  		struct ib_mad *mad);
873  
874  int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
875  		struct ib_mad *mad);
876  
877  void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
878  void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
879  
880  /* alias guid support */
881  void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
882  int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
883  void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
884  void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
885  
886  void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
887  					  int block_num,
888  					  u32 port_num, u8 *p_data);
889  
890  void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
891  					 int block_num, u32 port_num,
892  					 u8 *p_data);
893  
894  int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
895  			    struct attribute *attr);
896  void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
897  			     struct attribute *attr);
898  ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
899  void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
900  				    int port, int slave_init);
901  
902  int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
903  
904  void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
905  
906  __be64 mlx4_ib_gen_node_guid(void);
907  
908  int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
909  void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
910  int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
911  			 int is_attach);
912  struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
913  				    u64 length, u64 virt_addr,
914  				    int mr_access_flags, struct ib_pd *pd,
915  				    struct ib_udata *udata);
916  int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
917  				    const struct ib_gid_attr *attr);
918  
919  void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
920  				     int port);
921  
922  void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
923  
924  struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
925  				struct ib_wq_init_attr *init_attr,
926  				struct ib_udata *udata);
927  int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
928  int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
929  		      u32 wq_attr_mask, struct ib_udata *udata);
930  
931  int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
932  				 struct ib_rwq_ind_table_init_attr *init_attr,
933  				 struct ib_udata *udata);
934  static inline int
mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table * wq_ind_table)935  mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
936  {
937  	return 0;
938  }
939  int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
940  				       int *num_of_mtts);
941  
942  int mlx4_ib_cm_init(void);
943  void mlx4_ib_cm_destroy(void);
944  
945  int mlx4_ib_qp_event_init(void);
946  void mlx4_ib_qp_event_cleanup(void);
947  
948  #endif /* MLX4_IB_H */
949