1  // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2  /* Copyright (c) 2021 Mellanox Technologies Ltd */
3  
4  #include <linux/etherdevice.h>
5  #include <linux/mlx5/driver.h>
6  #include <linux/mlx5/mlx5_ifc.h>
7  #include <linux/mlx5/vport.h>
8  #include <linux/mlx5/fs.h>
9  #include "esw/acl/lgcy.h"
10  #include "esw/legacy.h"
11  #include "mlx5_core.h"
12  #include "eswitch.h"
13  #include "fs_core.h"
14  #include "fs_ft_pool.h"
15  #include "esw/qos.h"
16  
17  enum {
18  	LEGACY_VEPA_PRIO = 0,
19  	LEGACY_FDB_PRIO,
20  };
21  
esw_create_legacy_vepa_table(struct mlx5_eswitch * esw)22  static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
23  {
24  	struct mlx5_flow_table_attr ft_attr = {};
25  	struct mlx5_core_dev *dev = esw->dev;
26  	struct mlx5_flow_namespace *root_ns;
27  	struct mlx5_flow_table *fdb;
28  	int err;
29  
30  	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
31  	if (!root_ns) {
32  		esw_warn(dev, "Failed to get FDB flow namespace\n");
33  		return -EOPNOTSUPP;
34  	}
35  
36  	/* num FTE 2, num FG 2 */
37  	ft_attr.prio = LEGACY_VEPA_PRIO;
38  	ft_attr.max_fte = 2;
39  	ft_attr.autogroup.max_num_groups = 2;
40  	fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
41  	if (IS_ERR(fdb)) {
42  		err = PTR_ERR(fdb);
43  		esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
44  		return err;
45  	}
46  	esw->fdb_table.legacy.vepa_fdb = fdb;
47  
48  	return 0;
49  }
50  
esw_destroy_legacy_fdb_table(struct mlx5_eswitch * esw)51  static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
52  {
53  	esw_debug(esw->dev, "Destroy FDB Table\n");
54  	if (!esw->fdb_table.legacy.fdb)
55  		return;
56  
57  	if (esw->fdb_table.legacy.promisc_grp)
58  		mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
59  	if (esw->fdb_table.legacy.allmulti_grp)
60  		mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
61  	if (esw->fdb_table.legacy.addr_grp)
62  		mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
63  	mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
64  
65  	esw->fdb_table.legacy.fdb = NULL;
66  	esw->fdb_table.legacy.addr_grp = NULL;
67  	esw->fdb_table.legacy.allmulti_grp = NULL;
68  	esw->fdb_table.legacy.promisc_grp = NULL;
69  	atomic64_set(&esw->user_count, 0);
70  }
71  
esw_create_legacy_fdb_table(struct mlx5_eswitch * esw)72  static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
73  {
74  	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
75  	struct mlx5_flow_table_attr ft_attr = {};
76  	struct mlx5_core_dev *dev = esw->dev;
77  	struct mlx5_flow_namespace *root_ns;
78  	struct mlx5_flow_table *fdb;
79  	struct mlx5_flow_group *g;
80  	void *match_criteria;
81  	int table_size;
82  	u32 *flow_group_in;
83  	u8 *dmac;
84  	int err = 0;
85  
86  	esw_debug(dev, "Create FDB log_max_size(%d)\n",
87  		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
88  
89  	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
90  	if (!root_ns) {
91  		esw_warn(dev, "Failed to get FDB flow namespace\n");
92  		return -EOPNOTSUPP;
93  	}
94  
95  	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
96  	if (!flow_group_in)
97  		return -ENOMEM;
98  
99  	ft_attr.max_fte = POOL_NEXT_SIZE;
100  	ft_attr.prio = LEGACY_FDB_PRIO;
101  	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
102  	if (IS_ERR(fdb)) {
103  		err = PTR_ERR(fdb);
104  		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
105  		goto out;
106  	}
107  	esw->fdb_table.legacy.fdb = fdb;
108  	table_size = fdb->max_fte;
109  
110  	/* Addresses group : Full match unicast/multicast addresses */
111  	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
112  		 MLX5_MATCH_OUTER_HEADERS);
113  	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
114  	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
115  	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
116  	/* Preserve 2 entries for allmulti and promisc rules*/
117  	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
118  	eth_broadcast_addr(dmac);
119  	g = mlx5_create_flow_group(fdb, flow_group_in);
120  	if (IS_ERR(g)) {
121  		err = PTR_ERR(g);
122  		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
123  		goto out;
124  	}
125  	esw->fdb_table.legacy.addr_grp = g;
126  
127  	/* Allmulti group : One rule that forwards any mcast traffic */
128  	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
129  		 MLX5_MATCH_OUTER_HEADERS);
130  	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
131  	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
132  	eth_zero_addr(dmac);
133  	dmac[0] = 0x01;
134  	g = mlx5_create_flow_group(fdb, flow_group_in);
135  	if (IS_ERR(g)) {
136  		err = PTR_ERR(g);
137  		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
138  		goto out;
139  	}
140  	esw->fdb_table.legacy.allmulti_grp = g;
141  
142  	/* Promiscuous group :
143  	 * One rule that forward all unmatched traffic from previous groups
144  	 */
145  	eth_zero_addr(dmac);
146  	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
147  		 MLX5_MATCH_MISC_PARAMETERS);
148  	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
149  	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
150  	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
151  	g = mlx5_create_flow_group(fdb, flow_group_in);
152  	if (IS_ERR(g)) {
153  		err = PTR_ERR(g);
154  		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
155  		goto out;
156  	}
157  	esw->fdb_table.legacy.promisc_grp = g;
158  
159  out:
160  	if (err)
161  		esw_destroy_legacy_fdb_table(esw);
162  
163  	kvfree(flow_group_in);
164  	return err;
165  }
166  
esw_destroy_legacy_vepa_table(struct mlx5_eswitch * esw)167  static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
168  {
169  	esw_debug(esw->dev, "Destroy VEPA Table\n");
170  	if (!esw->fdb_table.legacy.vepa_fdb)
171  		return;
172  
173  	mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
174  	esw->fdb_table.legacy.vepa_fdb = NULL;
175  }
176  
esw_create_legacy_table(struct mlx5_eswitch * esw)177  static int esw_create_legacy_table(struct mlx5_eswitch *esw)
178  {
179  	int err;
180  
181  	memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
182  	atomic64_set(&esw->user_count, 0);
183  
184  	err = esw_create_legacy_vepa_table(esw);
185  	if (err)
186  		return err;
187  
188  	err = esw_create_legacy_fdb_table(esw);
189  	if (err)
190  		esw_destroy_legacy_vepa_table(esw);
191  
192  	return err;
193  }
194  
esw_cleanup_vepa_rules(struct mlx5_eswitch * esw)195  static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
196  {
197  	if (esw->fdb_table.legacy.vepa_uplink_rule)
198  		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
199  
200  	if (esw->fdb_table.legacy.vepa_star_rule)
201  		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
202  
203  	esw->fdb_table.legacy.vepa_uplink_rule = NULL;
204  	esw->fdb_table.legacy.vepa_star_rule = NULL;
205  }
206  
esw_destroy_legacy_table(struct mlx5_eswitch * esw)207  static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
208  {
209  	esw_cleanup_vepa_rules(esw);
210  	esw_destroy_legacy_fdb_table(esw);
211  	esw_destroy_legacy_vepa_table(esw);
212  }
213  
214  #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
215  					MLX5_VPORT_MC_ADDR_CHANGE | \
216  					MLX5_VPORT_PROMISC_CHANGE)
217  
esw_legacy_enable(struct mlx5_eswitch * esw)218  int esw_legacy_enable(struct mlx5_eswitch *esw)
219  {
220  	struct mlx5_vport *vport;
221  	unsigned long i;
222  	int ret;
223  
224  	ret = esw_create_legacy_table(esw);
225  	if (ret)
226  		return ret;
227  
228  	mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
229  		vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
230  
231  	ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
232  	if (ret)
233  		esw_destroy_legacy_table(esw);
234  	return ret;
235  }
236  
esw_legacy_disable(struct mlx5_eswitch * esw)237  void esw_legacy_disable(struct mlx5_eswitch *esw)
238  {
239  	struct esw_mc_addr *mc_promisc;
240  
241  	mlx5_eswitch_disable_pf_vf_vports(esw);
242  
243  	mc_promisc = &esw->mc_promisc;
244  	if (mc_promisc->uplink_rule)
245  		mlx5_del_flow_rules(mc_promisc->uplink_rule);
246  
247  	esw_destroy_legacy_table(esw);
248  }
249  
_mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch * esw,u8 setting)250  static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
251  					 u8 setting)
252  {
253  	struct mlx5_flow_destination dest = {};
254  	struct mlx5_flow_act flow_act = {};
255  	struct mlx5_flow_handle *flow_rule;
256  	struct mlx5_flow_spec *spec;
257  	int err = 0;
258  	void *misc;
259  
260  	if (!setting) {
261  		esw_cleanup_vepa_rules(esw);
262  		return 0;
263  	}
264  
265  	if (esw->fdb_table.legacy.vepa_uplink_rule)
266  		return 0;
267  
268  	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
269  	if (!spec)
270  		return -ENOMEM;
271  
272  	/* Uplink rule forward uplink traffic to FDB */
273  	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
274  	MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
275  
276  	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
277  	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
278  
279  	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
280  	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
281  	dest.ft = esw->fdb_table.legacy.fdb;
282  	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
283  	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
284  					&flow_act, &dest, 1);
285  	if (IS_ERR(flow_rule)) {
286  		err = PTR_ERR(flow_rule);
287  		goto out;
288  	}
289  	esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
290  
291  	/* Star rule to forward all traffic to uplink vport */
292  	memset(&dest, 0, sizeof(dest));
293  	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
294  	dest.vport.num = MLX5_VPORT_UPLINK;
295  	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
296  	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
297  					&flow_act, &dest, 1);
298  	if (IS_ERR(flow_rule)) {
299  		err = PTR_ERR(flow_rule);
300  		goto out;
301  	}
302  	esw->fdb_table.legacy.vepa_star_rule = flow_rule;
303  
304  out:
305  	kvfree(spec);
306  	if (err)
307  		esw_cleanup_vepa_rules(esw);
308  	return err;
309  }
310  
mlx5_eswitch_set_vepa(struct mlx5_eswitch * esw,u8 setting)311  int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
312  {
313  	int err = 0;
314  
315  	if (!esw)
316  		return -EOPNOTSUPP;
317  
318  	if (!mlx5_esw_allowed(esw))
319  		return -EPERM;
320  
321  	mutex_lock(&esw->state_lock);
322  	if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
323  		err = -EOPNOTSUPP;
324  		goto out;
325  	}
326  
327  	err = _mlx5_eswitch_set_vepa_locked(esw, setting);
328  
329  out:
330  	mutex_unlock(&esw->state_lock);
331  	return err;
332  }
333  
mlx5_eswitch_get_vepa(struct mlx5_eswitch * esw,u8 * setting)334  int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
335  {
336  	if (!esw)
337  		return -EOPNOTSUPP;
338  
339  	if (!mlx5_esw_allowed(esw))
340  		return -EPERM;
341  
342  	if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
343  		return -EOPNOTSUPP;
344  
345  	*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
346  	return 0;
347  }
348  
esw_legacy_vport_acl_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)349  int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
350  {
351  	int ret;
352  
353  	/* Only non manager vports need ACL in legacy mode */
354  	if (mlx5_esw_is_manager_vport(esw, vport->vport))
355  		return 0;
356  
357  	ret = esw_acl_ingress_lgcy_setup(esw, vport);
358  	if (ret)
359  		goto ingress_err;
360  
361  	ret = esw_acl_egress_lgcy_setup(esw, vport);
362  	if (ret)
363  		goto egress_err;
364  
365  	return 0;
366  
367  egress_err:
368  	esw_acl_ingress_lgcy_cleanup(esw, vport);
369  ingress_err:
370  	return ret;
371  }
372  
esw_legacy_vport_acl_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)373  void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
374  {
375  	if (mlx5_esw_is_manager_vport(esw, vport->vport))
376  		return;
377  
378  	esw_acl_egress_lgcy_cleanup(esw, vport);
379  	esw_acl_ingress_lgcy_cleanup(esw, vport);
380  }
381  
mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev * dev,struct mlx5_vport * vport,struct mlx5_vport_drop_stats * stats)382  int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
383  				    struct mlx5_vport *vport,
384  				    struct mlx5_vport_drop_stats *stats)
385  {
386  	u64 rx_discard_vport_down, tx_discard_vport_down;
387  	struct mlx5_eswitch *esw = dev->priv.eswitch;
388  	u64 bytes = 0;
389  	int err = 0;
390  
391  	if (esw->mode != MLX5_ESWITCH_LEGACY)
392  		return 0;
393  
394  	mutex_lock(&esw->state_lock);
395  	if (!vport->enabled)
396  		goto unlock;
397  
398  	if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
399  		mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
400  			      &stats->rx_dropped, &bytes);
401  
402  	if (vport->ingress.legacy.drop_counter)
403  		mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
404  			      &stats->tx_dropped, &bytes);
405  
406  	if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
407  	    !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
408  		goto unlock;
409  
410  	err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
411  					  &rx_discard_vport_down,
412  					  &tx_discard_vport_down);
413  	if (err)
414  		goto unlock;
415  
416  	if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
417  		stats->rx_dropped += rx_discard_vport_down;
418  	if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
419  		stats->tx_dropped += tx_discard_vport_down;
420  
421  unlock:
422  	mutex_unlock(&esw->state_lock);
423  	return err;
424  }
425  
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos)426  int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
427  				u16 vport, u16 vlan, u8 qos)
428  {
429  	u8 set_flags = 0;
430  	int err = 0;
431  
432  	if (!mlx5_esw_allowed(esw))
433  		return vlan ? -EPERM : 0;
434  
435  	if (vlan || qos)
436  		set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
437  
438  	mutex_lock(&esw->state_lock);
439  	if (esw->mode != MLX5_ESWITCH_LEGACY) {
440  		if (!vlan)
441  			goto unlock; /* compatibility with libvirt */
442  
443  		err = -EOPNOTSUPP;
444  		goto unlock;
445  	}
446  
447  	err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
448  
449  unlock:
450  	mutex_unlock(&esw->state_lock);
451  	return err;
452  }
453  
mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch * esw,u16 vport,bool spoofchk)454  int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
455  				    u16 vport, bool spoofchk)
456  {
457  	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
458  	bool pschk;
459  	int err = 0;
460  
461  	if (!mlx5_esw_allowed(esw))
462  		return -EPERM;
463  	if (IS_ERR(evport))
464  		return PTR_ERR(evport);
465  
466  	mutex_lock(&esw->state_lock);
467  	if (esw->mode != MLX5_ESWITCH_LEGACY) {
468  		err = -EOPNOTSUPP;
469  		goto unlock;
470  	}
471  	pschk = evport->info.spoofchk;
472  	evport->info.spoofchk = spoofchk;
473  	if (pschk && !is_valid_ether_addr(evport->info.mac))
474  		mlx5_core_warn(esw->dev,
475  			       "Spoofchk in set while MAC is invalid, vport(%d)\n",
476  			       evport->vport);
477  	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
478  		err = esw_acl_ingress_lgcy_setup(esw, evport);
479  	if (err)
480  		evport->info.spoofchk = pschk;
481  
482  unlock:
483  	mutex_unlock(&esw->state_lock);
484  	return err;
485  }
486  
mlx5_eswitch_set_vport_trust(struct mlx5_eswitch * esw,u16 vport,bool setting)487  int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
488  				 u16 vport, bool setting)
489  {
490  	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
491  	int err = 0;
492  
493  	if (!mlx5_esw_allowed(esw))
494  		return -EPERM;
495  	if (IS_ERR(evport))
496  		return PTR_ERR(evport);
497  
498  	mutex_lock(&esw->state_lock);
499  	if (esw->mode != MLX5_ESWITCH_LEGACY) {
500  		err = -EOPNOTSUPP;
501  		goto unlock;
502  	}
503  	evport->info.trusted = setting;
504  	if (evport->enabled)
505  		esw_vport_change_handle_locked(evport);
506  
507  unlock:
508  	mutex_unlock(&esw->state_lock);
509  	return err;
510  }
511  
mlx5_eswitch_set_vport_rate(struct mlx5_eswitch * esw,u16 vport,u32 max_rate,u32 min_rate)512  int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
513  				u32 max_rate, u32 min_rate)
514  {
515  	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
516  	int err;
517  
518  	if (!mlx5_esw_allowed(esw))
519  		return -EPERM;
520  	if (IS_ERR(evport))
521  		return PTR_ERR(evport);
522  
523  	mutex_lock(&esw->state_lock);
524  	err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
525  	mutex_unlock(&esw->state_lock);
526  	return err;
527  }
528