1  /*
2   * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3   *
4   * This software is available to you under a choice of one of two
5   * licenses.  You may choose to be licensed under the terms of the GNU
6   * General Public License (GPL) Version 2, available from the file
7   * COPYING in the main directory of this source tree, or the
8   * OpenIB.org BSD license below:
9   *
10   *     Redistribution and use in source and binary forms, with or
11   *     without modification, are permitted provided that the following
12   *     conditions are met:
13   *
14   *      - Redistributions of source code must retain the above
15   *        copyright notice, this list of conditions and the following
16   *        disclaimer.
17   *
18   *      - Redistributions in binary form must reproduce the above
19   *        copyright notice, this list of conditions and the following
20   *        disclaimer in the documentation and/or other materials
21   *        provided with the distribution.
22   *
23   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30   * SOFTWARE.
31   */
32  
33  #include <linux/mlx5/vport.h>
34  #include <rdma/ib_mad.h>
35  #include <rdma/ib_smi.h>
36  #include <rdma/ib_pma.h>
37  #include "mlx5_ib.h"
38  #include "cmd.h"
39  
40  enum {
41  	MLX5_IB_VENDOR_CLASS1 = 0x9,
42  	MLX5_IB_VENDOR_CLASS2 = 0xa
43  };
44  
can_do_mad_ifc(struct mlx5_ib_dev * dev,u32 port_num,struct ib_mad * in_mad)45  static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u32 port_num,
46  			   struct ib_mad *in_mad)
47  {
48  	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
49  	    in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
50  		return true;
51  	return dev->port_caps[port_num - 1].has_smi;
52  }
53  
mlx5_MAD_IFC(struct mlx5_ib_dev * dev,int ignore_mkey,int ignore_bkey,u32 port,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const void * in_mad,void * response_mad)54  static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
55  			int ignore_bkey, u32 port, const struct ib_wc *in_wc,
56  			const struct ib_grh *in_grh, const void *in_mad,
57  			void *response_mad)
58  {
59  	u8 op_modifier = 0;
60  
61  	if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad))
62  		return -EPERM;
63  
64  	/* Key check traps can't be generated unless we have in_wc to
65  	 * tell us where to send the trap.
66  	 */
67  	if (ignore_mkey || !in_wc)
68  		op_modifier |= 0x1;
69  	if (ignore_bkey || !in_wc)
70  		op_modifier |= 0x2;
71  
72  	return mlx5_cmd_mad_ifc(dev, in_mad, response_mad, op_modifier,
73  				port);
74  }
75  
pma_cnt_ext_assign(struct ib_pma_portcounters_ext * pma_cnt_ext,void * out)76  static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
77  			       void *out)
78  {
79  #define MLX5_SUM_CNT(p, cntr1, cntr2)	\
80  	(MLX5_GET64(query_vport_counter_out, p, cntr1) + \
81  	MLX5_GET64(query_vport_counter_out, p, cntr2))
82  
83  	pma_cnt_ext->port_xmit_data =
84  		cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
85  					 transmitted_ib_multicast.octets) >> 2);
86  	pma_cnt_ext->port_rcv_data =
87  		cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
88  					 received_ib_multicast.octets) >> 2);
89  	pma_cnt_ext->port_xmit_packets =
90  		cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
91  					 transmitted_ib_multicast.packets));
92  	pma_cnt_ext->port_rcv_packets =
93  		cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
94  					 received_ib_multicast.packets));
95  	pma_cnt_ext->port_unicast_xmit_packets =
96  		MLX5_GET64_BE(query_vport_counter_out,
97  			      out, transmitted_ib_unicast.packets);
98  	pma_cnt_ext->port_unicast_rcv_packets =
99  		MLX5_GET64_BE(query_vport_counter_out,
100  			      out, received_ib_unicast.packets);
101  	pma_cnt_ext->port_multicast_xmit_packets =
102  		MLX5_GET64_BE(query_vport_counter_out,
103  			      out, transmitted_ib_multicast.packets);
104  	pma_cnt_ext->port_multicast_rcv_packets =
105  		MLX5_GET64_BE(query_vport_counter_out,
106  			      out, received_ib_multicast.packets);
107  }
108  
pma_cnt_assign(struct ib_pma_portcounters * pma_cnt,void * out)109  static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
110  			   void *out)
111  {
112  	/* Traffic counters will be reported in
113  	 * their 64bit form via ib_pma_portcounters_ext by default.
114  	 */
115  	void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
116  				     counter_set);
117  
118  #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name)	{		\
119  	counter_var = MLX5_GET_BE(typeof(counter_var),			\
120  				  ib_port_cntrs_grp_data_layout,	\
121  				  out_pma, counter_name);		\
122  	}
123  
124  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
125  			     symbol_error_counter);
126  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
127  			     link_error_recovery_counter);
128  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
129  			     link_downed_counter);
130  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
131  			     port_rcv_errors);
132  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
133  			     port_rcv_remote_physical_errors);
134  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
135  			     port_rcv_switch_relay_errors);
136  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
137  			     port_xmit_discards);
138  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
139  			     port_xmit_constraint_errors);
140  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_wait,
141  			     port_xmit_wait);
142  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
143  			     port_rcv_constraint_errors);
144  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
145  			     link_overrun_errors);
146  	MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
147  			     vl_15_dropped);
148  }
149  
pma_cnt_ext_assign_ppcnt(struct ib_pma_portcounters_ext * cnt_ext,void * out)150  static void pma_cnt_ext_assign_ppcnt(struct ib_pma_portcounters_ext *cnt_ext,
151  				     void *out)
152  {
153  	void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
154  				     counter_set);
155  
156  #define MLX5_GET_EXT_CNTR(counter_name)			\
157  	MLX5_GET64(ib_ext_port_cntrs_grp_data_layout,	\
158  		   out_pma, counter_name##_high)
159  
160  	cnt_ext->port_xmit_data =
161  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_xmit_data) >> 2);
162  	cnt_ext->port_rcv_data =
163  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_rcv_data) >> 2);
164  
165  	cnt_ext->port_xmit_packets =
166  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_xmit_pkts));
167  	cnt_ext->port_rcv_packets =
168  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_rcv_pkts));
169  
170  	cnt_ext->port_unicast_xmit_packets =
171  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_unicast_xmit_pkts));
172  	cnt_ext->port_unicast_rcv_packets =
173  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_unicast_rcv_pkts));
174  
175  	cnt_ext->port_multicast_xmit_packets =
176  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_multicast_xmit_pkts));
177  	cnt_ext->port_multicast_rcv_packets =
178  		cpu_to_be64(MLX5_GET_EXT_CNTR(port_multicast_rcv_pkts));
179  }
180  
query_ib_ppcnt(struct mlx5_core_dev * dev,u8 port_num,u8 plane_num,void * out,size_t sz,bool ext)181  static int query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, u8 plane_num,
182  			  void *out, size_t sz, bool ext)
183  {
184  	u32 *in;
185  	int err;
186  
187  	in  = kvzalloc(sz, GFP_KERNEL);
188  	if (!in) {
189  		err = -ENOMEM;
190  		return err;
191  	}
192  
193  	MLX5_SET(ppcnt_reg, in, local_port, port_num);
194  	MLX5_SET(ppcnt_reg, in, plane_ind, plane_num);
195  
196  	if (ext)
197  		MLX5_SET(ppcnt_reg, in, grp,
198  			 MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP);
199  	else
200  		MLX5_SET(ppcnt_reg, in, grp,
201  			 MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
202  	err = mlx5_core_access_reg(dev, in, sz, out,
203  				   sz, MLX5_REG_PPCNT, 0, 0);
204  
205  	kvfree(in);
206  	return err;
207  }
208  
process_pma_cmd(struct mlx5_ib_dev * dev,u32 port_num,const struct ib_mad * in_mad,struct ib_mad * out_mad)209  static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
210  			   const struct ib_mad *in_mad, struct ib_mad *out_mad)
211  {
212  	struct mlx5_core_dev *mdev;
213  	bool native_port = true;
214  	u32 mdev_port_num;
215  	void *out_cnt;
216  	int err;
217  
218  	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
219  	if (!mdev) {
220  		/* Fail to get the native port, likely due to 2nd port is still
221  		 * unaffiliated. In such case default to 1st port and attached
222  		 * PF device.
223  		 */
224  		native_port = false;
225  		mdev = dev->mdev;
226  		mdev_port_num = 1;
227  	}
228  	if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
229  	    !mlx5_core_mp_enabled(mdev) &&
230  	    dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI) {
231  		/* set local port to one for Function-Per-Port HCA. */
232  		mdev = dev->mdev;
233  		mdev_port_num = 1;
234  	}
235  
236  	/* Declaring support of extended counters */
237  	if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
238  		struct ib_class_port_info cpi = {};
239  
240  		cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
241  		memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
242  		err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
243  		goto done;
244  	}
245  
246  	if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
247  		struct ib_pma_portcounters_ext *pma_cnt_ext =
248  			(struct ib_pma_portcounters_ext *)(out_mad->data + 40);
249  		int sz = max(MLX5_ST_SZ_BYTES(query_vport_counter_out),
250  			     MLX5_ST_SZ_BYTES(ppcnt_reg));
251  
252  		out_cnt = kvzalloc(sz, GFP_KERNEL);
253  		if (!out_cnt) {
254  			err = IB_MAD_RESULT_FAILURE;
255  			goto done;
256  		}
257  
258  		if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
259  			err = query_ib_ppcnt(mdev, mdev_port_num,
260  					     port_num, out_cnt, sz, 1);
261  			if (!err)
262  				pma_cnt_ext_assign_ppcnt(pma_cnt_ext, out_cnt);
263  		} else {
264  			err = mlx5_core_query_vport_counter(mdev, 0, 0,
265  							    mdev_port_num,
266  							    out_cnt);
267  			if (!err)
268  				pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
269  		}
270  	} else {
271  		struct ib_pma_portcounters *pma_cnt =
272  			(struct ib_pma_portcounters *)(out_mad->data + 40);
273  		int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
274  
275  		out_cnt = kvzalloc(sz, GFP_KERNEL);
276  		if (!out_cnt) {
277  			err = IB_MAD_RESULT_FAILURE;
278  			goto done;
279  		}
280  
281  		err = query_ib_ppcnt(mdev, mdev_port_num, 0, out_cnt, sz, 0);
282  		if (!err)
283  			pma_cnt_assign(pma_cnt, out_cnt);
284  	}
285  	kvfree(out_cnt);
286  	err = err ? IB_MAD_RESULT_FAILURE :
287  		    IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
288  done:
289  	if (native_port)
290  		mlx5_ib_put_native_port_mdev(dev, port_num);
291  	return err;
292  }
293  
mlx5_ib_process_mad(struct ib_device * ibdev,int mad_flags,u32 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in,struct ib_mad * out,size_t * out_mad_size,u16 * out_mad_pkey_index)294  int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
295  			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
296  			const struct ib_mad *in, struct ib_mad *out,
297  			size_t *out_mad_size, u16 *out_mad_pkey_index)
298  {
299  	struct mlx5_ib_dev *dev = to_mdev(ibdev);
300  	u8 mgmt_class = in->mad_hdr.mgmt_class;
301  	u8 method = in->mad_hdr.method;
302  	u16 slid;
303  	int err;
304  
305  	slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
306  		       be16_to_cpu(IB_LID_PERMISSIVE);
307  
308  	if (method == IB_MGMT_METHOD_TRAP && !slid)
309  		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
310  
311  	switch (mgmt_class) {
312  	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
313  	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
314  		if (method != IB_MGMT_METHOD_GET &&
315  		    method != IB_MGMT_METHOD_SET &&
316  		    method != IB_MGMT_METHOD_TRAP_REPRESS)
317  			return IB_MAD_RESULT_SUCCESS;
318  
319  		/* Don't process SMInfo queries -- the SMA can't handle them.
320  		 */
321  		if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
322  			return IB_MAD_RESULT_SUCCESS;
323  	} break;
324  	case IB_MGMT_CLASS_PERF_MGMT:
325  		if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
326  		    method == IB_MGMT_METHOD_GET)
327  			return process_pma_cmd(dev, port_num, in, out);
328  		fallthrough;
329  	case MLX5_IB_VENDOR_CLASS1:
330  	case MLX5_IB_VENDOR_CLASS2:
331  	case IB_MGMT_CLASS_CONG_MGMT: {
332  		if (method != IB_MGMT_METHOD_GET &&
333  		    method != IB_MGMT_METHOD_SET)
334  			return IB_MAD_RESULT_SUCCESS;
335  	} break;
336  	default:
337  		return IB_MAD_RESULT_SUCCESS;
338  	}
339  
340  	err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
341  			   mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
342  			   in_grh, in, out);
343  	if (err)
344  		return IB_MAD_RESULT_FAILURE;
345  
346  	/* set return bit in status of directed route responses */
347  	if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
348  		out->mad_hdr.status |= cpu_to_be16(1 << 15);
349  
350  	if (method == IB_MGMT_METHOD_TRAP_REPRESS)
351  		/* no response for trap repress */
352  		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
353  
354  	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
355  }
356  
mlx5_query_ext_port_caps(struct mlx5_ib_dev * dev,unsigned int port)357  int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
358  {
359  	struct ib_smp *in_mad;
360  	struct ib_smp *out_mad;
361  	int err = -ENOMEM;
362  	u16 packet_error;
363  
364  	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
365  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
366  	if (!in_mad || !out_mad)
367  		goto out;
368  
369  	ib_init_query_mad(in_mad);
370  	in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
371  	in_mad->attr_mod = cpu_to_be32(port);
372  
373  	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
374  
375  	packet_error = be16_to_cpu(out_mad->status);
376  
377  	dev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
378  		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
379  
380  out:
381  	kfree(in_mad);
382  	kfree(out_mad);
383  	return err;
384  }
385  
mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device * ibdev,struct ib_smp * out_mad)386  static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
387  						 struct ib_smp *out_mad)
388  {
389  	struct ib_smp *in_mad;
390  	int err;
391  
392  	in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
393  	if (!in_mad)
394  		return -ENOMEM;
395  
396  	ib_init_query_mad(in_mad);
397  	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
398  
399  	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
400  			   out_mad);
401  
402  	kfree(in_mad);
403  	return err;
404  }
405  
mlx5_query_mad_ifc_system_image_guid(struct ib_device * ibdev,__be64 * sys_image_guid)406  int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
407  					 __be64 *sys_image_guid)
408  {
409  	struct ib_smp *out_mad;
410  	int err;
411  
412  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
413  	if (!out_mad)
414  		return -ENOMEM;
415  
416  	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
417  	if (err)
418  		goto out;
419  
420  	memcpy(sys_image_guid, out_mad->data + 4, 8);
421  
422  out:
423  	kfree(out_mad);
424  
425  	return err;
426  }
427  
mlx5_query_mad_ifc_max_pkeys(struct ib_device * ibdev,u16 * max_pkeys)428  int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
429  				 u16 *max_pkeys)
430  {
431  	struct ib_smp *out_mad;
432  	int err;
433  
434  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
435  	if (!out_mad)
436  		return -ENOMEM;
437  
438  	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
439  	if (err)
440  		goto out;
441  
442  	*max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
443  
444  out:
445  	kfree(out_mad);
446  
447  	return err;
448  }
449  
mlx5_query_mad_ifc_vendor_id(struct ib_device * ibdev,u32 * vendor_id)450  int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
451  				 u32 *vendor_id)
452  {
453  	struct ib_smp *out_mad;
454  	int err;
455  
456  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
457  	if (!out_mad)
458  		return -ENOMEM;
459  
460  	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
461  	if (err)
462  		goto out;
463  
464  	*vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
465  
466  out:
467  	kfree(out_mad);
468  
469  	return err;
470  }
471  
mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev * dev,char * node_desc)472  int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
473  {
474  	struct ib_smp *in_mad;
475  	struct ib_smp *out_mad;
476  	int err = -ENOMEM;
477  
478  	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
479  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
480  	if (!in_mad || !out_mad)
481  		goto out;
482  
483  	ib_init_query_mad(in_mad);
484  	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
485  
486  	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
487  	if (err)
488  		goto out;
489  
490  	memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
491  out:
492  	kfree(in_mad);
493  	kfree(out_mad);
494  	return err;
495  }
496  
mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev * dev,__be64 * node_guid)497  int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
498  {
499  	struct ib_smp *in_mad;
500  	struct ib_smp *out_mad;
501  	int err = -ENOMEM;
502  
503  	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
504  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
505  	if (!in_mad || !out_mad)
506  		goto out;
507  
508  	ib_init_query_mad(in_mad);
509  	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
510  
511  	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
512  	if (err)
513  		goto out;
514  
515  	memcpy(node_guid, out_mad->data + 12, 8);
516  out:
517  	kfree(in_mad);
518  	kfree(out_mad);
519  	return err;
520  }
521  
mlx5_query_mad_ifc_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)522  int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
523  			    u16 *pkey)
524  {
525  	struct ib_smp *in_mad;
526  	struct ib_smp *out_mad;
527  	int err = -ENOMEM;
528  
529  	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
530  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
531  	if (!in_mad || !out_mad)
532  		goto out;
533  
534  	ib_init_query_mad(in_mad);
535  	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
536  	in_mad->attr_mod = cpu_to_be32(index / 32);
537  
538  	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
539  			   out_mad);
540  	if (err)
541  		goto out;
542  
543  	*pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
544  
545  out:
546  	kfree(in_mad);
547  	kfree(out_mad);
548  	return err;
549  }
550  
mlx5_query_mad_ifc_gids(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)551  int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
552  			    union ib_gid *gid)
553  {
554  	struct ib_smp *in_mad;
555  	struct ib_smp *out_mad;
556  	int err = -ENOMEM;
557  
558  	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
559  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
560  	if (!in_mad || !out_mad)
561  		goto out;
562  
563  	ib_init_query_mad(in_mad);
564  	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
565  	in_mad->attr_mod = cpu_to_be32(port);
566  
567  	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
568  			   out_mad);
569  	if (err)
570  		goto out;
571  
572  	memcpy(gid->raw, out_mad->data + 8, 8);
573  
574  	ib_init_query_mad(in_mad);
575  	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
576  	in_mad->attr_mod = cpu_to_be32(index / 8);
577  
578  	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
579  			   out_mad);
580  	if (err)
581  		goto out;
582  
583  	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
584  
585  out:
586  	kfree(in_mad);
587  	kfree(out_mad);
588  	return err;
589  }
590  
mlx5_query_mad_ifc_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)591  int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
592  			    struct ib_port_attr *props)
593  {
594  	struct mlx5_ib_dev *dev = to_mdev(ibdev);
595  	struct mlx5_core_dev *mdev = dev->mdev;
596  	struct ib_smp *in_mad;
597  	struct ib_smp *out_mad;
598  	int ext_active_speed;
599  	int err = -ENOMEM;
600  
601  	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
602  	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
603  	if (!in_mad || !out_mad)
604  		goto out;
605  
606  	/* props being zeroed by the caller, avoid zeroing it here */
607  
608  	ib_init_query_mad(in_mad);
609  	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
610  	in_mad->attr_mod = cpu_to_be32(port);
611  
612  	err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
613  	if (err) {
614  		mlx5_ib_warn(dev, "err %d\n", err);
615  		goto out;
616  	}
617  
618  	props->lid		= be16_to_cpup((__be16 *)(out_mad->data + 16));
619  	props->lmc		= out_mad->data[34] & 0x7;
620  	props->sm_lid		= be16_to_cpup((__be16 *)(out_mad->data + 18));
621  	props->sm_sl		= out_mad->data[36] & 0xf;
622  	props->state		= out_mad->data[32] & 0xf;
623  	props->phys_state	= out_mad->data[33] >> 4;
624  	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
625  	props->gid_tbl_len	= out_mad->data[50];
626  	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
627  	props->pkey_tbl_len	= dev->pkey_table_len;
628  	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
629  	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
630  	props->active_width	= out_mad->data[31] & 0xf;
631  	props->active_speed	= out_mad->data[35] >> 4;
632  	props->max_mtu		= out_mad->data[41] & 0xf;
633  	props->active_mtu	= out_mad->data[36] >> 4;
634  	props->subnet_timeout	= out_mad->data[51] & 0x1f;
635  	props->max_vl_num	= out_mad->data[37] >> 4;
636  	props->init_type_reply	= out_mad->data[41] >> 4;
637  
638  	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) {
639  		props->port_cap_flags2 =
640  			be16_to_cpup((__be16 *)(out_mad->data + 60));
641  
642  		if (props->port_cap_flags2 & IB_PORT_LINK_WIDTH_2X_SUP)
643  			props->active_width = out_mad->data[31] & 0x1f;
644  	}
645  
646  	/* Check if extended speeds (EDR/FDR/...) are supported */
647  	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
648  		ext_active_speed = out_mad->data[62] >> 4;
649  
650  		switch (ext_active_speed) {
651  		case 1:
652  			props->active_speed = 16; /* FDR */
653  			break;
654  		case 2:
655  			props->active_speed = 32; /* EDR */
656  			break;
657  		case 4:
658  			if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
659  			    props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
660  				props->active_speed = IB_SPEED_HDR;
661  			break;
662  		case 8:
663  			if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
664  			    props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP)
665  				props->active_speed = IB_SPEED_NDR;
666  			break;
667  		}
668  	}
669  
670  	/* Check if extended speeds 2 (XDR/...) are supported */
671  	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
672  	    props->port_cap_flags2 & IB_PORT_EXTENDED_SPEEDS2_SUP) {
673  		ext_active_speed = (out_mad->data[56] >> 4) & 0x6;
674  
675  		switch (ext_active_speed) {
676  		case 2:
677  			if (props->port_cap_flags2 & IB_PORT_LINK_SPEED_XDR_SUP)
678  				props->active_speed = IB_SPEED_XDR;
679  			break;
680  		}
681  	}
682  
683  	/* If reported active speed is QDR, check if is FDR-10 */
684  	if (props->active_speed == 4) {
685  		if (dev->port_caps[port - 1].ext_port_cap &
686  		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
687  			ib_init_query_mad(in_mad);
688  			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
689  			in_mad->attr_mod = cpu_to_be32(port);
690  
691  			err = mlx5_MAD_IFC(dev, 1, 1, port,
692  					   NULL, NULL, in_mad, out_mad);
693  			if (err)
694  				goto out;
695  
696  			/* Checking LinkSpeedActive for FDR-10 */
697  			if (out_mad->data[15] & 0x1)
698  				props->active_speed = 8;
699  		}
700  	}
701  
702  out:
703  	kfree(in_mad);
704  	kfree(out_mad);
705  
706  	return err;
707  }
708