1  /*
2   * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3   *
4   * This software is available to you under a choice of one of two
5   * licenses.  You may choose to be licensed under the terms of the GNU
6   * General Public License (GPL) Version 2, available from the file
7   * COPYING in the main directory of this source tree, or the
8   * OpenIB.org BSD license below:
9   *
10   *     Redistribution and use in source and binary forms, with or
11   *     without modification, are permitted provided that the following
12   *     conditions are met:
13   *
14   *      - Redistributions of source code must retain the above
15   *        copyright notice, this list of conditions and the following
16   *        disclaimer.
17   *
18   *      - Redistributions in binary form must reproduce the above
19   *        copyright notice, this list of conditions and the following
20   *        disclaimer in the documentation and/or other materials
21   *        provided with the distribution.
22   *
23   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30   * SOFTWARE.
31   */
32  
33  #include <linux/export.h>
34  #include <linux/etherdevice.h>
35  #include <linux/mlx5/driver.h>
36  #include <linux/mlx5/vport.h>
37  #include <linux/mlx5/eswitch.h>
38  #include "mlx5_core.h"
39  #include "sf/sf.h"
40  
41  /* Mutex to hold while enabling or disabling RoCE */
42  static DEFINE_MUTEX(mlx5_roce_en_lock);
43  
mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)44  u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
45  {
46  	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
47  	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
48  	int err;
49  
50  	MLX5_SET(query_vport_state_in, in, opcode,
51  		 MLX5_CMD_OP_QUERY_VPORT_STATE);
52  	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
53  	MLX5_SET(query_vport_state_in, in, vport_number, vport);
54  	if (vport)
55  		MLX5_SET(query_vport_state_in, in, other_vport, 1);
56  
57  	err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
58  	if (err)
59  		return 0;
60  
61  	return MLX5_GET(query_vport_state_out, out, state);
62  }
63  
mlx5_modify_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 other_vport,u8 state)64  int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
65  				  u16 vport, u8 other_vport, u8 state)
66  {
67  	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
68  
69  	MLX5_SET(modify_vport_state_in, in, opcode,
70  		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
71  	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
72  	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
73  	MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
74  	MLX5_SET(modify_vport_state_in, in, admin_state, state);
75  
76  	return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
77  }
78  
mlx5_query_nic_vport_context(struct mlx5_core_dev * mdev,u16 vport,u32 * out)79  static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
80  					u32 *out)
81  {
82  	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
83  
84  	MLX5_SET(query_nic_vport_context_in, in, opcode,
85  		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
86  	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
87  	if (vport)
88  		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
89  
90  	return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
91  }
92  
mlx5_query_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 * min_inline)93  int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
94  				    u16 vport, u8 *min_inline)
95  {
96  	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
97  	int err;
98  
99  	err = mlx5_query_nic_vport_context(mdev, vport, out);
100  	if (!err)
101  		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
102  				       nic_vport_context.min_wqe_inline_mode);
103  	return err;
104  }
105  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
106  
mlx5_query_min_inline(struct mlx5_core_dev * mdev,u8 * min_inline_mode)107  void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
108  			   u8 *min_inline_mode)
109  {
110  	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
111  	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
112  		if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
113  			break;
114  		fallthrough;
115  	case MLX5_CAP_INLINE_MODE_L2:
116  		*min_inline_mode = MLX5_INLINE_MODE_L2;
117  		break;
118  	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
119  		*min_inline_mode = MLX5_INLINE_MODE_NONE;
120  		break;
121  	}
122  }
123  EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
124  
mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 min_inline)125  int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
126  				     u16 vport, u8 min_inline)
127  {
128  	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
129  	void *nic_vport_ctx;
130  
131  	MLX5_SET(modify_nic_vport_context_in, in,
132  		 field_select.min_inline, 1);
133  	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
134  	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
135  
136  	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
137  				     in, nic_vport_context);
138  	MLX5_SET(nic_vport_context, nic_vport_ctx,
139  		 min_wqe_inline_mode, min_inline);
140  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
141  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
142  
143  	return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
144  }
145  
mlx5_query_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,bool other,u8 * addr)146  int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
147  				     u16 vport, bool other, u8 *addr)
148  {
149  	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
150  	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
151  	u8 *out_addr;
152  	int err;
153  
154  	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
155  				nic_vport_context.permanent_address);
156  
157  	MLX5_SET(query_nic_vport_context_in, in, opcode,
158  		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
159  	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
160  	MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
161  
162  	err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
163  	if (!err)
164  		ether_addr_copy(addr, &out_addr[2]);
165  
166  	return err;
167  }
168  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
169  
mlx5_query_mac_address(struct mlx5_core_dev * mdev,u8 * addr)170  int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
171  {
172  	return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
173  }
174  EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
175  
mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,const u8 * addr)176  int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
177  				      u16 vport, const u8 *addr)
178  {
179  	void *in;
180  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
181  	int err;
182  	void *nic_vport_ctx;
183  	u8 *perm_mac;
184  
185  	in = kvzalloc(inlen, GFP_KERNEL);
186  	if (!in)
187  		return -ENOMEM;
188  
189  	MLX5_SET(modify_nic_vport_context_in, in,
190  		 field_select.permanent_address, 1);
191  	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
192  	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
193  
194  	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
195  				     in, nic_vport_context);
196  	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
197  				permanent_address);
198  
199  	ether_addr_copy(&perm_mac[2], addr);
200  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
201  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
202  
203  	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
204  
205  	kvfree(in);
206  
207  	return err;
208  }
209  EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
210  
mlx5_query_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 * mtu)211  int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
212  {
213  	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
214  	u32 *out;
215  	int err;
216  
217  	out = kvzalloc(outlen, GFP_KERNEL);
218  	if (!out)
219  		return -ENOMEM;
220  
221  	err = mlx5_query_nic_vport_context(mdev, 0, out);
222  	if (!err)
223  		*mtu = MLX5_GET(query_nic_vport_context_out, out,
224  				nic_vport_context.mtu);
225  
226  	kvfree(out);
227  	return err;
228  }
229  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
230  
mlx5_modify_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 mtu)231  int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
232  {
233  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
234  	void *in;
235  	int err;
236  
237  	in = kvzalloc(inlen, GFP_KERNEL);
238  	if (!in)
239  		return -ENOMEM;
240  
241  	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
242  	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
243  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
244  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
245  
246  	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
247  
248  	kvfree(in);
249  	return err;
250  }
251  EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
252  
mlx5_query_nic_vport_mac_list(struct mlx5_core_dev * dev,u16 vport,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int * list_size)253  int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
254  				  u16 vport,
255  				  enum mlx5_list_type list_type,
256  				  u8 addr_list[][ETH_ALEN],
257  				  int *list_size)
258  {
259  	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
260  	void *nic_vport_ctx;
261  	int max_list_size;
262  	int req_list_size;
263  	int out_sz;
264  	void *out;
265  	int err;
266  	int i;
267  
268  	req_list_size = *list_size;
269  
270  	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
271  		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
272  		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
273  
274  	if (req_list_size > max_list_size) {
275  		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
276  			       req_list_size, max_list_size);
277  		req_list_size = max_list_size;
278  	}
279  
280  	out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
281  			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
282  
283  	out = kvzalloc(out_sz, GFP_KERNEL);
284  	if (!out)
285  		return -ENOMEM;
286  
287  	MLX5_SET(query_nic_vport_context_in, in, opcode,
288  		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
289  	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
290  	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
291  	if (vport || mlx5_core_is_ecpf(dev))
292  		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
293  
294  	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
295  	if (err)
296  		goto out;
297  
298  	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
299  				     nic_vport_context);
300  	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
301  				 allowed_list_size);
302  
303  	*list_size = req_list_size;
304  	for (i = 0; i < req_list_size; i++) {
305  		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
306  					nic_vport_ctx,
307  					current_uc_mac_address[i]) + 2;
308  		ether_addr_copy(addr_list[i], mac_addr);
309  	}
310  out:
311  	kvfree(out);
312  	return err;
313  }
314  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
315  
mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev * dev,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int list_size)316  int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
317  				   enum mlx5_list_type list_type,
318  				   u8 addr_list[][ETH_ALEN],
319  				   int list_size)
320  {
321  	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
322  	void *nic_vport_ctx;
323  	int max_list_size;
324  	int in_sz;
325  	void *in;
326  	int err;
327  	int i;
328  
329  	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
330  		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
331  		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
332  
333  	if (list_size > max_list_size)
334  		return -ENOSPC;
335  
336  	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
337  		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
338  
339  	in = kvzalloc(in_sz, GFP_KERNEL);
340  	if (!in)
341  		return -ENOMEM;
342  
343  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
344  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
345  	MLX5_SET(modify_nic_vport_context_in, in,
346  		 field_select.addresses_list, 1);
347  
348  	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
349  				     nic_vport_context);
350  
351  	MLX5_SET(nic_vport_context, nic_vport_ctx,
352  		 allowed_list_type, list_type);
353  	MLX5_SET(nic_vport_context, nic_vport_ctx,
354  		 allowed_list_size, list_size);
355  
356  	for (i = 0; i < list_size; i++) {
357  		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
358  					    nic_vport_ctx,
359  					    current_uc_mac_address[i]) + 2;
360  		ether_addr_copy(curr_mac, addr_list[i]);
361  	}
362  
363  	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
364  	kvfree(in);
365  	return err;
366  }
367  EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
368  
mlx5_modify_nic_vport_vlans(struct mlx5_core_dev * dev,u16 vlans[],int list_size)369  int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
370  				u16 vlans[],
371  				int list_size)
372  {
373  	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
374  	void *nic_vport_ctx;
375  	int max_list_size;
376  	int in_sz;
377  	void *in;
378  	int err;
379  	int i;
380  
381  	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
382  
383  	if (list_size > max_list_size)
384  		return -ENOSPC;
385  
386  	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
387  		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
388  
389  	memset(out, 0, sizeof(out));
390  	in = kvzalloc(in_sz, GFP_KERNEL);
391  	if (!in)
392  		return -ENOMEM;
393  
394  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
395  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
396  	MLX5_SET(modify_nic_vport_context_in, in,
397  		 field_select.addresses_list, 1);
398  
399  	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
400  				     nic_vport_context);
401  
402  	MLX5_SET(nic_vport_context, nic_vport_ctx,
403  		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
404  	MLX5_SET(nic_vport_context, nic_vport_ctx,
405  		 allowed_list_size, list_size);
406  
407  	for (i = 0; i < list_size; i++) {
408  		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
409  					       nic_vport_ctx,
410  					       current_uc_mac_address[i]);
411  		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
412  	}
413  
414  	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
415  	kvfree(in);
416  	return err;
417  }
418  EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
419  
mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev * mdev,u64 * system_image_guid)420  int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
421  					   u64 *system_image_guid)
422  {
423  	u32 *out;
424  	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
425  	int err;
426  
427  	out = kvzalloc(outlen, GFP_KERNEL);
428  	if (!out)
429  		return -ENOMEM;
430  
431  	err = mlx5_query_nic_vport_context(mdev, 0, out);
432  	if (err)
433  		goto out;
434  
435  	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
436  					nic_vport_context.system_image_guid);
437  out:
438  	kvfree(out);
439  	return err;
440  }
441  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
442  
mlx5_query_nic_vport_sd_group(struct mlx5_core_dev * mdev,u8 * sd_group)443  int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
444  {
445  	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
446  	u32 *out;
447  	int err;
448  
449  	out = kvzalloc(outlen, GFP_KERNEL);
450  	if (!out)
451  		return -ENOMEM;
452  
453  	err = mlx5_query_nic_vport_context(mdev, 0, out);
454  	if (err)
455  		goto out;
456  
457  	*sd_group = MLX5_GET(query_nic_vport_context_out, out,
458  			     nic_vport_context.sd_group);
459  out:
460  	kvfree(out);
461  	return err;
462  }
463  
mlx5_query_nic_vport_node_guid(struct mlx5_core_dev * mdev,u64 * node_guid)464  int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
465  {
466  	u32 *out;
467  	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
468  
469  	out = kvzalloc(outlen, GFP_KERNEL);
470  	if (!out)
471  		return -ENOMEM;
472  
473  	mlx5_query_nic_vport_context(mdev, 0, out);
474  
475  	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
476  				nic_vport_context.node_guid);
477  
478  	kvfree(out);
479  
480  	return 0;
481  }
482  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
483  
mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev * mdev,u16 vport,u64 node_guid)484  int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
485  				    u16 vport, u64 node_guid)
486  {
487  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
488  	void *nic_vport_context;
489  	void *in;
490  	int err;
491  
492  	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
493  		return -EACCES;
494  
495  	in = kvzalloc(inlen, GFP_KERNEL);
496  	if (!in)
497  		return -ENOMEM;
498  
499  	MLX5_SET(modify_nic_vport_context_in, in,
500  		 field_select.node_guid, 1);
501  	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
502  	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
503  
504  	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
505  					 in, nic_vport_context);
506  	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
507  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
508  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
509  
510  	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
511  
512  	kvfree(in);
513  
514  	return err;
515  }
516  
mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev * mdev,u16 * qkey_viol_cntr)517  int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
518  					u16 *qkey_viol_cntr)
519  {
520  	u32 *out;
521  	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
522  
523  	out = kvzalloc(outlen, GFP_KERNEL);
524  	if (!out)
525  		return -ENOMEM;
526  
527  	mlx5_query_nic_vport_context(mdev, 0, out);
528  
529  	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
530  				   nic_vport_context.qkey_violation_counter);
531  
532  	kvfree(out);
533  
534  	return 0;
535  }
536  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
537  
mlx5_query_hca_vport_gid(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 gid_index,union ib_gid * gid)538  int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
539  			     u8 port_num, u16  vf_num, u16 gid_index,
540  			     union ib_gid *gid)
541  {
542  	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
543  	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
544  	int is_group_manager;
545  	void *out = NULL;
546  	void *in = NULL;
547  	union ib_gid *tmp;
548  	int tbsz;
549  	int nout;
550  	int err;
551  
552  	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
553  	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
554  	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
555  		      vf_num, gid_index, tbsz);
556  
557  	if (gid_index > tbsz && gid_index != 0xffff)
558  		return -EINVAL;
559  
560  	if (gid_index == 0xffff)
561  		nout = tbsz;
562  	else
563  		nout = 1;
564  
565  	out_sz += nout * sizeof(*gid);
566  
567  	in = kvzalloc(in_sz, GFP_KERNEL);
568  	out = kvzalloc(out_sz, GFP_KERNEL);
569  	if (!in || !out) {
570  		err = -ENOMEM;
571  		goto out;
572  	}
573  
574  	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
575  	if (other_vport) {
576  		if (is_group_manager) {
577  			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
578  			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
579  		} else {
580  			err = -EPERM;
581  			goto out;
582  		}
583  	}
584  	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
585  
586  	if (MLX5_CAP_GEN(dev, num_ports) == 2)
587  		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
588  
589  	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
590  	if (err)
591  		goto out;
592  
593  	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
594  	gid->global.subnet_prefix = tmp->global.subnet_prefix;
595  	gid->global.interface_id = tmp->global.interface_id;
596  
597  out:
598  	kvfree(in);
599  	kvfree(out);
600  	return err;
601  }
602  EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
603  
mlx5_query_hca_vport_pkey(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 pkey_index,u16 * pkey)604  int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
605  			      u8 port_num, u16 vf_num, u16 pkey_index,
606  			      u16 *pkey)
607  {
608  	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
609  	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
610  	int is_group_manager;
611  	void *out = NULL;
612  	void *in = NULL;
613  	void *pkarr;
614  	int nout;
615  	int tbsz;
616  	int err;
617  	int i;
618  
619  	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
620  
621  	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
622  	if (pkey_index > tbsz && pkey_index != 0xffff)
623  		return -EINVAL;
624  
625  	if (pkey_index == 0xffff)
626  		nout = tbsz;
627  	else
628  		nout = 1;
629  
630  	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
631  
632  	in = kvzalloc(in_sz, GFP_KERNEL);
633  	out = kvzalloc(out_sz, GFP_KERNEL);
634  	if (!in || !out) {
635  		err = -ENOMEM;
636  		goto out;
637  	}
638  
639  	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
640  	if (other_vport) {
641  		if (is_group_manager) {
642  			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
643  			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
644  		} else {
645  			err = -EPERM;
646  			goto out;
647  		}
648  	}
649  	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
650  
651  	if (MLX5_CAP_GEN(dev, num_ports) == 2)
652  		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
653  
654  	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
655  	if (err)
656  		goto out;
657  
658  	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
659  	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
660  		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
661  
662  out:
663  	kvfree(in);
664  	kvfree(out);
665  	return err;
666  }
667  EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
668  
mlx5_query_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,struct mlx5_hca_vport_context * rep)669  int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
670  				 u8 other_vport, u8 port_num,
671  				 u16 vf_num,
672  				 struct mlx5_hca_vport_context *rep)
673  {
674  	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
675  	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
676  	int is_group_manager;
677  	void *out;
678  	void *ctx;
679  	int err;
680  
681  	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
682  
683  	out = kvzalloc(out_sz, GFP_KERNEL);
684  	if (!out)
685  		return -ENOMEM;
686  
687  	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
688  
689  	if (other_vport) {
690  		if (is_group_manager) {
691  			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
692  			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
693  		} else {
694  			err = -EPERM;
695  			goto ex;
696  		}
697  	}
698  
699  	if (MLX5_CAP_GEN(dev, num_ports) == 2)
700  		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
701  
702  	err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
703  	if (err)
704  		goto ex;
705  
706  	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
707  	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
708  	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
709  	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
710  	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
711  	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
712  	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
713  				      port_physical_state);
714  	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
715  	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
716  					       port_physical_state);
717  	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
718  	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
719  	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
720  	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
721  					  cap_mask1_field_select);
722  	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
723  	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
724  					  cap_mask2_field_select);
725  	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
726  	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
727  					   init_type_reply);
728  	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
729  	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
730  					  subnet_timeout);
731  	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
732  	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
733  	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
734  						  qkey_violation_counter);
735  	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
736  						  pkey_violation_counter);
737  	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
738  	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
739  					    system_image_guid);
740  	rep->num_plane = MLX5_GET_PR(hca_vport_context, ctx, num_port_plane);
741  
742  ex:
743  	kvfree(out);
744  	return err;
745  }
746  EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
747  
mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev * dev,u64 * sys_image_guid)748  int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
749  					   u64 *sys_image_guid)
750  {
751  	struct mlx5_hca_vport_context *rep;
752  	int err;
753  
754  	rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
755  	if (!rep)
756  		return -ENOMEM;
757  
758  	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
759  	if (!err)
760  		*sys_image_guid = rep->sys_image_guid;
761  
762  	kvfree(rep);
763  	return err;
764  }
765  EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
766  
mlx5_query_hca_vport_node_guid(struct mlx5_core_dev * dev,u64 * node_guid)767  int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
768  				   u64 *node_guid)
769  {
770  	struct mlx5_hca_vport_context *rep;
771  	int err;
772  
773  	rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
774  	if (!rep)
775  		return -ENOMEM;
776  
777  	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
778  	if (!err)
779  		*node_guid = rep->node_guid;
780  
781  	kvfree(rep);
782  	return err;
783  }
784  EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
785  
mlx5_query_nic_vport_promisc(struct mlx5_core_dev * mdev,u16 vport,int * promisc_uc,int * promisc_mc,int * promisc_all)786  int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
787  				 u16 vport,
788  				 int *promisc_uc,
789  				 int *promisc_mc,
790  				 int *promisc_all)
791  {
792  	u32 *out;
793  	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
794  	int err;
795  
796  	out = kvzalloc(outlen, GFP_KERNEL);
797  	if (!out)
798  		return -ENOMEM;
799  
800  	err = mlx5_query_nic_vport_context(mdev, vport, out);
801  	if (err)
802  		goto out;
803  
804  	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
805  			       nic_vport_context.promisc_uc);
806  	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
807  			       nic_vport_context.promisc_mc);
808  	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
809  				nic_vport_context.promisc_all);
810  
811  out:
812  	kvfree(out);
813  	return err;
814  }
815  EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
816  
mlx5_modify_nic_vport_promisc(struct mlx5_core_dev * mdev,int promisc_uc,int promisc_mc,int promisc_all)817  int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
818  				  int promisc_uc,
819  				  int promisc_mc,
820  				  int promisc_all)
821  {
822  	void *in;
823  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
824  	int err;
825  
826  	in = kvzalloc(inlen, GFP_KERNEL);
827  	if (!in)
828  		return -ENOMEM;
829  
830  	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
831  	MLX5_SET(modify_nic_vport_context_in, in,
832  		 nic_vport_context.promisc_uc, promisc_uc);
833  	MLX5_SET(modify_nic_vport_context_in, in,
834  		 nic_vport_context.promisc_mc, promisc_mc);
835  	MLX5_SET(modify_nic_vport_context_in, in,
836  		 nic_vport_context.promisc_all, promisc_all);
837  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
838  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
839  
840  	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
841  
842  	kvfree(in);
843  
844  	return err;
845  }
846  EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
847  
848  enum {
849  	UC_LOCAL_LB,
850  	MC_LOCAL_LB
851  };
852  
mlx5_nic_vport_update_local_lb(struct mlx5_core_dev * mdev,bool enable)853  int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
854  {
855  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
856  	void *in;
857  	int err;
858  
859  	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
860  	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
861  		return 0;
862  
863  	in = kvzalloc(inlen, GFP_KERNEL);
864  	if (!in)
865  		return -ENOMEM;
866  
867  	MLX5_SET(modify_nic_vport_context_in, in,
868  		 nic_vport_context.disable_mc_local_lb, !enable);
869  	MLX5_SET(modify_nic_vport_context_in, in,
870  		 nic_vport_context.disable_uc_local_lb, !enable);
871  
872  	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
873  		MLX5_SET(modify_nic_vport_context_in, in,
874  			 field_select.disable_mc_local_lb, 1);
875  
876  	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
877  		MLX5_SET(modify_nic_vport_context_in, in,
878  			 field_select.disable_uc_local_lb, 1);
879  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
880  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
881  
882  	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
883  
884  	if (!err)
885  		mlx5_core_dbg(mdev, "%s local_lb\n",
886  			      enable ? "enable" : "disable");
887  
888  	kvfree(in);
889  	return err;
890  }
891  EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
892  
mlx5_nic_vport_query_local_lb(struct mlx5_core_dev * mdev,bool * status)893  int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
894  {
895  	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
896  	u32 *out;
897  	int value;
898  	int err;
899  
900  	out = kvzalloc(outlen, GFP_KERNEL);
901  	if (!out)
902  		return -ENOMEM;
903  
904  	err = mlx5_query_nic_vport_context(mdev, 0, out);
905  	if (err)
906  		goto out;
907  
908  	value = MLX5_GET(query_nic_vport_context_out, out,
909  			 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
910  
911  	value |= MLX5_GET(query_nic_vport_context_out, out,
912  			  nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
913  
914  	*status = !value;
915  
916  out:
917  	kvfree(out);
918  	return err;
919  }
920  EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
921  
922  enum mlx5_vport_roce_state {
923  	MLX5_VPORT_ROCE_DISABLED = 0,
924  	MLX5_VPORT_ROCE_ENABLED  = 1,
925  };
926  
mlx5_nic_vport_update_roce_state(struct mlx5_core_dev * mdev,enum mlx5_vport_roce_state state)927  static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
928  					    enum mlx5_vport_roce_state state)
929  {
930  	void *in;
931  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
932  	int err;
933  
934  	in = kvzalloc(inlen, GFP_KERNEL);
935  	if (!in)
936  		return -ENOMEM;
937  
938  	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
939  	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
940  		 state);
941  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
942  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
943  
944  	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
945  
946  	kvfree(in);
947  
948  	return err;
949  }
950  
mlx5_nic_vport_enable_roce(struct mlx5_core_dev * mdev)951  int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
952  {
953  	int err = 0;
954  
955  	mutex_lock(&mlx5_roce_en_lock);
956  	if (!mdev->roce.roce_en)
957  		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
958  
959  	if (!err)
960  		mdev->roce.roce_en++;
961  	mutex_unlock(&mlx5_roce_en_lock);
962  
963  	return err;
964  }
965  EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
966  
mlx5_nic_vport_disable_roce(struct mlx5_core_dev * mdev)967  int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
968  {
969  	int err = 0;
970  
971  	mutex_lock(&mlx5_roce_en_lock);
972  	if (mdev->roce.roce_en) {
973  		mdev->roce.roce_en--;
974  		if (mdev->roce.roce_en == 0)
975  			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
976  
977  		if (err)
978  			mdev->roce.roce_en++;
979  	}
980  	mutex_unlock(&mlx5_roce_en_lock);
981  	return err;
982  }
983  EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
984  
mlx5_core_query_vport_counter(struct mlx5_core_dev * dev,u8 other_vport,int vf,u8 port_num,void * out)985  int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
986  				  int vf, u8 port_num, void *out)
987  {
988  	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
989  	int is_group_manager;
990  	void *in;
991  	int err;
992  
993  	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
994  	in = kvzalloc(in_sz, GFP_KERNEL);
995  	if (!in) {
996  		err = -ENOMEM;
997  		return err;
998  	}
999  
1000  	MLX5_SET(query_vport_counter_in, in, opcode,
1001  		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1002  	if (other_vport) {
1003  		if (is_group_manager) {
1004  			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1005  			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1006  		} else {
1007  			err = -EPERM;
1008  			goto free;
1009  		}
1010  	}
1011  	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1012  		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1013  
1014  	err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
1015  free:
1016  	kvfree(in);
1017  	return err;
1018  }
1019  EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1020  
mlx5_query_vport_down_stats(struct mlx5_core_dev * mdev,u16 vport,u8 other_vport,u64 * rx_discard_vport_down,u64 * tx_discard_vport_down)1021  int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
1022  				u8 other_vport, u64 *rx_discard_vport_down,
1023  				u64 *tx_discard_vport_down)
1024  {
1025  	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
1026  	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
1027  	int err;
1028  
1029  	MLX5_SET(query_vnic_env_in, in, opcode,
1030  		 MLX5_CMD_OP_QUERY_VNIC_ENV);
1031  	MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1032  	MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1033  	MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
1034  
1035  	err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
1036  	if (err)
1037  		return err;
1038  
1039  	*rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1040  					    vport_env.receive_discard_vport_down);
1041  	*tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1042  					    vport_env.transmit_discard_vport_down);
1043  	return 0;
1044  }
1045  
mlx5_core_modify_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,int vf,struct mlx5_hca_vport_context * req)1046  int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1047  				       u8 other_vport, u8 port_num,
1048  				       int vf,
1049  				       struct mlx5_hca_vport_context *req)
1050  {
1051  	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1052  	int is_group_manager;
1053  	void *ctx;
1054  	void *in;
1055  	int err;
1056  
1057  	mlx5_core_dbg(dev, "vf %d\n", vf);
1058  	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1059  	in = kvzalloc(in_sz, GFP_KERNEL);
1060  	if (!in)
1061  		return -ENOMEM;
1062  
1063  	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1064  	if (other_vport) {
1065  		if (is_group_manager) {
1066  			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1067  			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1068  		} else {
1069  			err = -EPERM;
1070  			goto ex;
1071  		}
1072  	}
1073  
1074  	if (MLX5_CAP_GEN(dev, num_ports) > 1)
1075  		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1076  
1077  	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1078  	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1079  	if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
1080  		MLX5_SET(hca_vport_context, ctx, vport_state_policy,
1081  			 req->policy);
1082  	if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
1083  		MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1084  	if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
1085  		MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1086  	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1087  	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
1088  		 req->cap_mask1_perm);
1089  	err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
1090  ex:
1091  	kvfree(in);
1092  	return err;
1093  }
1094  EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1095  
mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev * master_mdev,struct mlx5_core_dev * port_mdev)1096  int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1097  				       struct mlx5_core_dev *port_mdev)
1098  {
1099  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1100  	void *in;
1101  	int err;
1102  
1103  	in = kvzalloc(inlen, GFP_KERNEL);
1104  	if (!in)
1105  		return -ENOMEM;
1106  
1107  	err = mlx5_nic_vport_enable_roce(port_mdev);
1108  	if (err)
1109  		goto free;
1110  
1111  	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1112  	if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
1113  		MLX5_SET(modify_nic_vport_context_in, in,
1114  			 nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
1115  		MLX5_SET(modify_nic_vport_context_in, in,
1116  			 nic_vport_context.affiliated_vhca_id,
1117  			 MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
1118  	} else {
1119  		MLX5_SET(modify_nic_vport_context_in, in,
1120  			 nic_vport_context.affiliated_vhca_id,
1121  			 MLX5_CAP_GEN(master_mdev, vhca_id));
1122  	}
1123  	MLX5_SET(modify_nic_vport_context_in, in,
1124  		 nic_vport_context.affiliation_criteria,
1125  		 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1126  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
1127  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1128  
1129  	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1130  	if (err)
1131  		mlx5_nic_vport_disable_roce(port_mdev);
1132  
1133  free:
1134  	kvfree(in);
1135  	return err;
1136  }
1137  EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1138  
mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev * port_mdev)1139  int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1140  {
1141  	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1142  	void *in;
1143  	int err;
1144  
1145  	in = kvzalloc(inlen, GFP_KERNEL);
1146  	if (!in)
1147  		return -ENOMEM;
1148  
1149  	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1150  	MLX5_SET(modify_nic_vport_context_in, in,
1151  		 nic_vport_context.affiliated_vhca_id, 0);
1152  	MLX5_SET(modify_nic_vport_context_in, in,
1153  		 nic_vport_context.affiliation_criteria, 0);
1154  	MLX5_SET(modify_nic_vport_context_in, in, opcode,
1155  		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1156  
1157  	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1158  	if (!err)
1159  		mlx5_nic_vport_disable_roce(port_mdev);
1160  
1161  	kvfree(in);
1162  	return err;
1163  }
1164  EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1165  
mlx5_query_nic_system_image_guid(struct mlx5_core_dev * mdev)1166  u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
1167  {
1168  	int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1169  	u64 tmp;
1170  	int err;
1171  
1172  	if (mdev->sys_image_guid)
1173  		return mdev->sys_image_guid;
1174  
1175  	if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
1176  		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
1177  	else
1178  		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
1179  
1180  	mdev->sys_image_guid = err ? 0 : tmp;
1181  
1182  	return mdev->sys_image_guid;
1183  }
1184  EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
1185  
mlx5_vport_get_other_func_cap(struct mlx5_core_dev * dev,u16 vport,void * out,u16 opmod)1186  int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
1187  				  u16 opmod)
1188  {
1189  	bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
1190  	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1191  
1192  	opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1193  	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1194  	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1195  	MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func));
1196  	MLX5_SET(query_hca_cap_in, in, other_function, true);
1197  	MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
1198  	return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1199  }
1200  EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
1201  
mlx5_vport_set_other_func_cap(struct mlx5_core_dev * dev,const void * hca_cap,u16 vport,u16 opmod)1202  int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
1203  				  u16 vport, u16 opmod)
1204  {
1205  	bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
1206  	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
1207  	void *set_hca_cap;
1208  	void *set_ctx;
1209  	int ret;
1210  
1211  	set_ctx = kzalloc(set_sz, GFP_KERNEL);
1212  	if (!set_ctx)
1213  		return -ENOMEM;
1214  
1215  	MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
1216  	MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
1217  	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
1218  	memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
1219  	MLX5_SET(set_hca_cap_in, set_ctx, function_id,
1220  		 mlx5_vport_to_func_id(dev, vport, ec_vf_func));
1221  	MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
1222  	MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
1223  	ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
1224  
1225  	kfree(set_ctx);
1226  	return ret;
1227  }
1228