1  // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2  // Copyright (c) 2020 Mellanox Technologies.
3  
4  #include <linux/mlx5/driver.h>
5  #include <linux/mlx5/mlx5_ifc.h>
6  #include <linux/mlx5/fs.h>
7  
8  #include "lib/fs_chains.h"
9  #include "fs_ft_pool.h"
10  #include "en/mapping.h"
11  #include "fs_core.h"
12  #include "en_tc.h"
13  
14  #define chains_lock(chains) ((chains)->lock)
15  #define chains_ht(chains) ((chains)->chains_ht)
16  #define prios_ht(chains) ((chains)->prios_ht)
17  #define chains_default_ft(chains) ((chains)->chains_default_ft)
18  #define chains_end_ft(chains) ((chains)->chains_end_ft)
19  #define FT_TBL_SZ (64 * 1024)
20  
21  struct mlx5_fs_chains {
22  	struct mlx5_core_dev *dev;
23  
24  	struct rhashtable chains_ht;
25  	struct rhashtable prios_ht;
26  	/* Protects above chains_ht and prios_ht */
27  	struct mutex lock;
28  
29  	struct mlx5_flow_table *chains_default_ft;
30  	struct mlx5_flow_table *chains_end_ft;
31  	struct mapping_ctx *chains_mapping;
32  
33  	enum mlx5_flow_namespace_type ns;
34  	u32 group_num;
35  	u32 flags;
36  	int fs_base_prio;
37  	int fs_base_level;
38  };
39  
40  struct fs_chain {
41  	struct rhash_head node;
42  
43  	u32 chain;
44  
45  	int ref;
46  	int id;
47  
48  	struct mlx5_fs_chains *chains;
49  	struct list_head prios_list;
50  	struct mlx5_flow_handle *restore_rule;
51  	struct mlx5_modify_hdr *miss_modify_hdr;
52  };
53  
54  struct prio_key {
55  	u32 chain;
56  	u32 prio;
57  	u32 level;
58  };
59  
60  struct prio {
61  	struct rhash_head node;
62  	struct list_head list;
63  
64  	struct prio_key key;
65  
66  	int ref;
67  
68  	struct fs_chain *chain;
69  	struct mlx5_flow_table *ft;
70  	struct mlx5_flow_table *next_ft;
71  	struct mlx5_flow_group *miss_group;
72  	struct mlx5_flow_handle *miss_rule;
73  };
74  
75  static const struct rhashtable_params chain_params = {
76  	.head_offset = offsetof(struct fs_chain, node),
77  	.key_offset = offsetof(struct fs_chain, chain),
78  	.key_len = sizeof_field(struct fs_chain, chain),
79  	.automatic_shrinking = true,
80  };
81  
82  static const struct rhashtable_params prio_params = {
83  	.head_offset = offsetof(struct prio, node),
84  	.key_offset = offsetof(struct prio, key),
85  	.key_len = sizeof_field(struct prio, key),
86  	.automatic_shrinking = true,
87  };
88  
mlx5_chains_prios_supported(struct mlx5_fs_chains * chains)89  bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
90  {
91  	return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
92  }
93  
mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains * chains)94  bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
95  {
96  	return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
97  }
98  
mlx5_chains_backwards_supported(struct mlx5_fs_chains * chains)99  bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
100  {
101  	return mlx5_chains_prios_supported(chains) &&
102  	       mlx5_chains_ignore_flow_level_supported(chains);
103  }
104  
mlx5_chains_get_chain_range(struct mlx5_fs_chains * chains)105  u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
106  {
107  	if (!mlx5_chains_prios_supported(chains))
108  		return 1;
109  
110  	if (mlx5_chains_ignore_flow_level_supported(chains))
111  		return UINT_MAX - 1;
112  
113  	/* We should get here only for eswitch case */
114  	return FDB_TC_MAX_CHAIN;
115  }
116  
mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains * chains)117  u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
118  {
119  	return mlx5_chains_get_chain_range(chains) + 1;
120  }
121  
mlx5_chains_get_prio_range(struct mlx5_fs_chains * chains)122  u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
123  {
124  	if (mlx5_chains_ignore_flow_level_supported(chains))
125  		return UINT_MAX;
126  
127  	if (!chains->dev->priv.eswitch ||
128  	    chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
129  		return 1;
130  
131  	/* We should get here only for eswitch case */
132  	return FDB_TC_MAX_PRIO;
133  }
134  
mlx5_chains_get_level_range(struct mlx5_fs_chains * chains)135  static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
136  {
137  	if (mlx5_chains_ignore_flow_level_supported(chains))
138  		return UINT_MAX;
139  
140  	/* Same value for FDB and NIC RX tables */
141  	return FDB_TC_LEVELS_PER_PRIO;
142  }
143  
144  void
mlx5_chains_set_end_ft(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)145  mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
146  		       struct mlx5_flow_table *ft)
147  {
148  	chains_end_ft(chains) = ft;
149  }
150  
151  static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)152  mlx5_chains_create_table(struct mlx5_fs_chains *chains,
153  			 u32 chain, u32 prio, u32 level)
154  {
155  	struct mlx5_flow_table_attr ft_attr = {};
156  	struct mlx5_flow_namespace *ns;
157  	struct mlx5_flow_table *ft;
158  	int sz;
159  
160  	if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
161  		ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
162  				  MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
163  
164  	sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
165  	ft_attr.max_fte = sz;
166  
167  	/* We use chains_default_ft(chains) as the table's next_ft till
168  	 * ignore_flow_level is allowed on FT creation and not just for FTEs.
169  	 * Instead caller should add an explicit miss rule if needed.
170  	 */
171  	ft_attr.next_ft = chains_default_ft(chains);
172  
173  	/* The root table(chain 0, prio 1, level 0) is required to be
174  	 * connected to the previous fs_core managed prio.
175  	 * We always create it, as a managed table, in order to align with
176  	 * fs_core logic.
177  	 */
178  	if (!mlx5_chains_ignore_flow_level_supported(chains) ||
179  	    (chain == 0 && prio == 1 && level == 0)) {
180  		ft_attr.level = chains->fs_base_level;
181  		ft_attr.prio = chains->fs_base_prio + prio - 1;
182  		ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
183  			mlx5_get_fdb_sub_ns(chains->dev, chain) :
184  			mlx5_get_flow_namespace(chains->dev, chains->ns);
185  	} else {
186  		ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
187  		ft_attr.prio = chains->fs_base_prio;
188  		/* Firmware doesn't allow us to create another level 0 table,
189  		 * so we create all unmanaged tables as level 1 (base + 1).
190  		 *
191  		 * To connect them, we use explicit miss rules with
192  		 * ignore_flow_level. Caller is responsible to create
193  		 * these rules (if needed).
194  		 */
195  		ft_attr.level = chains->fs_base_level + 1;
196  		ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
197  	}
198  
199  	ft_attr.autogroup.num_reserved_entries = 2;
200  	ft_attr.autogroup.max_num_groups = chains->group_num;
201  	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
202  	if (IS_ERR(ft)) {
203  		mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
204  			       (int)PTR_ERR(ft), chain, prio, level, sz);
205  		return ft;
206  	}
207  
208  	return ft;
209  }
210  
211  static int
create_chain_restore(struct fs_chain * chain)212  create_chain_restore(struct fs_chain *chain)
213  {
214  	struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
215  	u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
216  	struct mlx5_fs_chains *chains = chain->chains;
217  	enum mlx5e_tc_attr_to_reg mapped_obj_to_reg;
218  	struct mlx5_modify_hdr *mod_hdr;
219  	u32 index;
220  	int err;
221  
222  	if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
223  	    !mlx5_chains_prios_supported(chains) ||
224  	    !chains->chains_mapping)
225  		return 0;
226  
227  	err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
228  	if (err)
229  		return err;
230  	if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
231  		/* we got the special default flow tag id, so we won't know
232  		 * if we actually marked the packet with the restore rule
233  		 * we create.
234  		 *
235  		 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
236  		 */
237  		err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
238  		mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
239  		if (err)
240  			return err;
241  	}
242  
243  	chain->id = index;
244  
245  	if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
246  		mapped_obj_to_reg = MAPPED_OBJ_TO_REG;
247  		chain->restore_rule = esw_add_restore_rule(esw, chain->id);
248  		if (IS_ERR(chain->restore_rule)) {
249  			err = PTR_ERR(chain->restore_rule);
250  			goto err_rule;
251  		}
252  	} else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
253  		/* For NIC RX we don't need a restore rule
254  		 * since we write the metadata to reg_b
255  		 * that is passed to SW directly.
256  		 */
257  		mapped_obj_to_reg = NIC_MAPPED_OBJ_TO_REG;
258  	} else {
259  		err = -EINVAL;
260  		goto err_rule;
261  	}
262  
263  	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
264  	MLX5_SET(set_action_in, modact, field,
265  		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mfield);
266  	MLX5_SET(set_action_in, modact, offset,
267  		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].moffset);
268  	MLX5_SET(set_action_in, modact, length,
269  		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen == 32 ?
270  		 0 : mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen);
271  	MLX5_SET(set_action_in, modact, data, chain->id);
272  	mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
273  					   1, modact);
274  	if (IS_ERR(mod_hdr)) {
275  		err = PTR_ERR(mod_hdr);
276  		goto err_mod_hdr;
277  	}
278  	chain->miss_modify_hdr = mod_hdr;
279  
280  	return 0;
281  
282  err_mod_hdr:
283  	if (!IS_ERR_OR_NULL(chain->restore_rule))
284  		mlx5_del_flow_rules(chain->restore_rule);
285  err_rule:
286  	/* Datapath can't find this mapping, so we can safely remove it */
287  	mapping_remove(chains->chains_mapping, chain->id);
288  	return err;
289  }
290  
destroy_chain_restore(struct fs_chain * chain)291  static void destroy_chain_restore(struct fs_chain *chain)
292  {
293  	struct mlx5_fs_chains *chains = chain->chains;
294  
295  	if (!chain->miss_modify_hdr)
296  		return;
297  
298  	if (chain->restore_rule)
299  		mlx5_del_flow_rules(chain->restore_rule);
300  
301  	mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
302  	mapping_remove(chains->chains_mapping, chain->id);
303  }
304  
305  static struct fs_chain *
mlx5_chains_create_chain(struct mlx5_fs_chains * chains,u32 chain)306  mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
307  {
308  	struct fs_chain *chain_s = NULL;
309  	int err;
310  
311  	chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
312  	if (!chain_s)
313  		return ERR_PTR(-ENOMEM);
314  
315  	chain_s->chains = chains;
316  	chain_s->chain = chain;
317  	INIT_LIST_HEAD(&chain_s->prios_list);
318  
319  	err = create_chain_restore(chain_s);
320  	if (err)
321  		goto err_restore;
322  
323  	err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
324  				     chain_params);
325  	if (err)
326  		goto err_insert;
327  
328  	return chain_s;
329  
330  err_insert:
331  	destroy_chain_restore(chain_s);
332  err_restore:
333  	kvfree(chain_s);
334  	return ERR_PTR(err);
335  }
336  
337  static void
mlx5_chains_destroy_chain(struct fs_chain * chain)338  mlx5_chains_destroy_chain(struct fs_chain *chain)
339  {
340  	struct mlx5_fs_chains *chains = chain->chains;
341  
342  	rhashtable_remove_fast(&chains_ht(chains), &chain->node,
343  			       chain_params);
344  
345  	destroy_chain_restore(chain);
346  	kvfree(chain);
347  }
348  
349  static struct fs_chain *
mlx5_chains_get_chain(struct mlx5_fs_chains * chains,u32 chain)350  mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
351  {
352  	struct fs_chain *chain_s;
353  
354  	chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
355  					 chain_params);
356  	if (!chain_s) {
357  		chain_s = mlx5_chains_create_chain(chains, chain);
358  		if (IS_ERR(chain_s))
359  			return chain_s;
360  	}
361  
362  	chain_s->ref++;
363  
364  	return chain_s;
365  }
366  
367  static struct mlx5_flow_handle *
mlx5_chains_add_miss_rule(struct fs_chain * chain,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)368  mlx5_chains_add_miss_rule(struct fs_chain *chain,
369  			  struct mlx5_flow_table *ft,
370  			  struct mlx5_flow_table *next_ft)
371  {
372  	struct mlx5_fs_chains *chains = chain->chains;
373  	struct mlx5_flow_destination dest = {};
374  	struct mlx5_flow_act act = {};
375  
376  	act.flags  = FLOW_ACT_NO_APPEND;
377  	if (mlx5_chains_ignore_flow_level_supported(chain->chains))
378  		act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
379  
380  	act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
381  	dest.type  = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
382  	dest.ft = next_ft;
383  
384  	if (chains->chains_mapping && next_ft == chains_end_ft(chains) &&
385  	    chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
386  	    mlx5_chains_prios_supported(chains)) {
387  		act.modify_hdr = chain->miss_modify_hdr;
388  		act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
389  	}
390  
391  	return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
392  }
393  
394  static int
mlx5_chains_update_prio_prevs(struct prio * prio,struct mlx5_flow_table * next_ft)395  mlx5_chains_update_prio_prevs(struct prio *prio,
396  			      struct mlx5_flow_table *next_ft)
397  {
398  	struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
399  	struct fs_chain *chain = prio->chain;
400  	struct prio *pos;
401  	int n = 0, err;
402  
403  	if (prio->key.level)
404  		return 0;
405  
406  	/* Iterate in reverse order until reaching the level 0 rule of
407  	 * the previous priority, adding all the miss rules first, so we can
408  	 * revert them if any of them fails.
409  	 */
410  	pos = prio;
411  	list_for_each_entry_continue_reverse(pos,
412  					     &chain->prios_list,
413  					     list) {
414  		miss_rules[n] = mlx5_chains_add_miss_rule(chain,
415  							  pos->ft,
416  							  next_ft);
417  		if (IS_ERR(miss_rules[n])) {
418  			err = PTR_ERR(miss_rules[n]);
419  			goto err_prev_rule;
420  		}
421  
422  		n++;
423  		if (!pos->key.level)
424  			break;
425  	}
426  
427  	/* Success, delete old miss rules, and update the pointers. */
428  	n = 0;
429  	pos = prio;
430  	list_for_each_entry_continue_reverse(pos,
431  					     &chain->prios_list,
432  					     list) {
433  		mlx5_del_flow_rules(pos->miss_rule);
434  
435  		pos->miss_rule = miss_rules[n];
436  		pos->next_ft = next_ft;
437  
438  		n++;
439  		if (!pos->key.level)
440  			break;
441  	}
442  
443  	return 0;
444  
445  err_prev_rule:
446  	while (--n >= 0)
447  		mlx5_del_flow_rules(miss_rules[n]);
448  
449  	return err;
450  }
451  
452  static void
mlx5_chains_put_chain(struct fs_chain * chain)453  mlx5_chains_put_chain(struct fs_chain *chain)
454  {
455  	if (--chain->ref == 0)
456  		mlx5_chains_destroy_chain(chain);
457  }
458  
459  static struct prio *
mlx5_chains_create_prio(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)460  mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
461  			u32 chain, u32 prio, u32 level)
462  {
463  	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
464  	struct mlx5_flow_handle *miss_rule;
465  	struct mlx5_flow_group *miss_group;
466  	struct mlx5_flow_table *next_ft;
467  	struct mlx5_flow_table *ft;
468  	struct fs_chain *chain_s;
469  	struct list_head *pos;
470  	struct prio *prio_s;
471  	u32 *flow_group_in;
472  	int err;
473  
474  	chain_s = mlx5_chains_get_chain(chains, chain);
475  	if (IS_ERR(chain_s))
476  		return ERR_CAST(chain_s);
477  
478  	prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
479  	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
480  	if (!prio_s || !flow_group_in) {
481  		err = -ENOMEM;
482  		goto err_alloc;
483  	}
484  
485  	/* Chain's prio list is sorted by prio and level.
486  	 * And all levels of some prio point to the next prio's level 0.
487  	 * Example list (prio, level):
488  	 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
489  	 * In hardware, we will we have the following pointers:
490  	 * (3,0) -> (5,0) -> (7,0) -> Slow path
491  	 * (3,1) -> (5,0)
492  	 * (5,1) -> (7,0)
493  	 * (6,1) -> (7,0)
494  	 */
495  
496  	/* Default miss for each chain: */
497  	next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
498  		  chains_default_ft(chains) :
499  		  chains_end_ft(chains);
500  	list_for_each(pos, &chain_s->prios_list) {
501  		struct prio *p = list_entry(pos, struct prio, list);
502  
503  		/* exit on first pos that is larger */
504  		if (prio < p->key.prio || (prio == p->key.prio &&
505  					   level < p->key.level)) {
506  			/* Get next level 0 table */
507  			next_ft = p->key.level == 0 ? p->ft : p->next_ft;
508  			break;
509  		}
510  	}
511  
512  	ft = mlx5_chains_create_table(chains, chain, prio, level);
513  	if (IS_ERR(ft)) {
514  		err = PTR_ERR(ft);
515  		goto err_create;
516  	}
517  
518  	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
519  		 ft->max_fte - 2);
520  	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
521  		 ft->max_fte - 1);
522  	miss_group = mlx5_create_flow_group(ft, flow_group_in);
523  	if (IS_ERR(miss_group)) {
524  		err = PTR_ERR(miss_group);
525  		goto err_group;
526  	}
527  
528  	/* Add miss rule to next_ft */
529  	miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
530  	if (IS_ERR(miss_rule)) {
531  		err = PTR_ERR(miss_rule);
532  		goto err_miss_rule;
533  	}
534  
535  	prio_s->miss_group = miss_group;
536  	prio_s->miss_rule = miss_rule;
537  	prio_s->next_ft = next_ft;
538  	prio_s->chain = chain_s;
539  	prio_s->key.chain = chain;
540  	prio_s->key.prio = prio;
541  	prio_s->key.level = level;
542  	prio_s->ft = ft;
543  
544  	err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
545  				     prio_params);
546  	if (err)
547  		goto err_insert;
548  
549  	list_add(&prio_s->list, pos->prev);
550  
551  	/* Table is ready, connect it */
552  	err = mlx5_chains_update_prio_prevs(prio_s, ft);
553  	if (err)
554  		goto err_update;
555  
556  	kvfree(flow_group_in);
557  	return prio_s;
558  
559  err_update:
560  	list_del(&prio_s->list);
561  	rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
562  			       prio_params);
563  err_insert:
564  	mlx5_del_flow_rules(miss_rule);
565  err_miss_rule:
566  	mlx5_destroy_flow_group(miss_group);
567  err_group:
568  	mlx5_destroy_flow_table(ft);
569  err_create:
570  err_alloc:
571  	kvfree(prio_s);
572  	kvfree(flow_group_in);
573  	mlx5_chains_put_chain(chain_s);
574  	return ERR_PTR(err);
575  }
576  
577  static void
mlx5_chains_destroy_prio(struct mlx5_fs_chains * chains,struct prio * prio)578  mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
579  			 struct prio *prio)
580  {
581  	struct fs_chain *chain = prio->chain;
582  
583  	WARN_ON(mlx5_chains_update_prio_prevs(prio,
584  					      prio->next_ft));
585  
586  	list_del(&prio->list);
587  	rhashtable_remove_fast(&prios_ht(chains), &prio->node,
588  			       prio_params);
589  	mlx5_del_flow_rules(prio->miss_rule);
590  	mlx5_destroy_flow_group(prio->miss_group);
591  	mlx5_destroy_flow_table(prio->ft);
592  	mlx5_chains_put_chain(chain);
593  	kvfree(prio);
594  }
595  
596  struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)597  mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
598  		      u32 level)
599  {
600  	struct mlx5_flow_table *prev_fts;
601  	struct prio *prio_s;
602  	struct prio_key key;
603  	int l = 0;
604  
605  	if ((chain > mlx5_chains_get_chain_range(chains) &&
606  	     chain != mlx5_chains_get_nf_ft_chain(chains)) ||
607  	    prio > mlx5_chains_get_prio_range(chains) ||
608  	    level > mlx5_chains_get_level_range(chains))
609  		return ERR_PTR(-EOPNOTSUPP);
610  
611  	/* create earlier levels for correct fs_core lookup when
612  	 * connecting tables.
613  	 */
614  	for (l = 0; l < level; l++) {
615  		prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
616  		if (IS_ERR(prev_fts)) {
617  			prio_s = ERR_CAST(prev_fts);
618  			goto err_get_prevs;
619  		}
620  	}
621  
622  	key.chain = chain;
623  	key.prio = prio;
624  	key.level = level;
625  
626  	mutex_lock(&chains_lock(chains));
627  	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
628  					prio_params);
629  	if (!prio_s) {
630  		prio_s = mlx5_chains_create_prio(chains, chain,
631  						 prio, level);
632  		if (IS_ERR(prio_s))
633  			goto err_create_prio;
634  	}
635  
636  	++prio_s->ref;
637  	mutex_unlock(&chains_lock(chains));
638  
639  	return prio_s->ft;
640  
641  err_create_prio:
642  	mutex_unlock(&chains_lock(chains));
643  err_get_prevs:
644  	while (--l >= 0)
645  		mlx5_chains_put_table(chains, chain, prio, l);
646  	return ERR_CAST(prio_s);
647  }
648  
649  void
mlx5_chains_put_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)650  mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
651  		      u32 level)
652  {
653  	struct prio *prio_s;
654  	struct prio_key key;
655  
656  	key.chain = chain;
657  	key.prio = prio;
658  	key.level = level;
659  
660  	mutex_lock(&chains_lock(chains));
661  	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
662  					prio_params);
663  	if (!prio_s)
664  		goto err_get_prio;
665  
666  	if (--prio_s->ref == 0)
667  		mlx5_chains_destroy_prio(chains, prio_s);
668  	mutex_unlock(&chains_lock(chains));
669  
670  	while (level-- > 0)
671  		mlx5_chains_put_table(chains, chain, prio, level);
672  
673  	return;
674  
675  err_get_prio:
676  	mutex_unlock(&chains_lock(chains));
677  	WARN_ONCE(1,
678  		  "Couldn't find table: (chain: %d prio: %d level: %d)",
679  		  chain, prio, level);
680  }
681  
682  struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains * chains)683  mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
684  {
685  	return chains_end_ft(chains);
686  }
687  
688  struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains * chains)689  mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
690  {
691  	u32 chain, prio, level;
692  	int err;
693  
694  	if (!mlx5_chains_ignore_flow_level_supported(chains)) {
695  		err = -EOPNOTSUPP;
696  
697  		mlx5_core_warn(chains->dev,
698  			       "Couldn't create global flow table, ignore_flow_level not supported.");
699  		goto err_ignore;
700  	}
701  
702  	chain = mlx5_chains_get_chain_range(chains),
703  	prio = mlx5_chains_get_prio_range(chains);
704  	level = mlx5_chains_get_level_range(chains);
705  
706  	return mlx5_chains_create_table(chains, chain, prio, level);
707  
708  err_ignore:
709  	return ERR_PTR(err);
710  }
711  
712  void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)713  mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
714  				 struct mlx5_flow_table *ft)
715  {
716  	mlx5_destroy_flow_table(ft);
717  }
718  
719  static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)720  mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
721  {
722  	struct mlx5_fs_chains *chains;
723  	int err;
724  
725  	chains = kzalloc(sizeof(*chains), GFP_KERNEL);
726  	if (!chains)
727  		return ERR_PTR(-ENOMEM);
728  
729  	chains->dev = dev;
730  	chains->flags = attr->flags;
731  	chains->ns = attr->ns;
732  	chains->group_num = attr->max_grp_num;
733  	chains->chains_mapping = attr->mapping;
734  	chains->fs_base_prio = attr->fs_base_prio;
735  	chains->fs_base_level = attr->fs_base_level;
736  	chains_default_ft(chains) = chains_end_ft(chains) = attr->default_ft;
737  
738  	err = rhashtable_init(&chains_ht(chains), &chain_params);
739  	if (err)
740  		goto init_chains_ht_err;
741  
742  	err = rhashtable_init(&prios_ht(chains), &prio_params);
743  	if (err)
744  		goto init_prios_ht_err;
745  
746  	mutex_init(&chains_lock(chains));
747  
748  	return chains;
749  
750  init_prios_ht_err:
751  	rhashtable_destroy(&chains_ht(chains));
752  init_chains_ht_err:
753  	kfree(chains);
754  	return ERR_PTR(err);
755  }
756  
757  static void
mlx5_chains_cleanup(struct mlx5_fs_chains * chains)758  mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
759  {
760  	mutex_destroy(&chains_lock(chains));
761  	rhashtable_destroy(&prios_ht(chains));
762  	rhashtable_destroy(&chains_ht(chains));
763  
764  	kfree(chains);
765  }
766  
767  struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)768  mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
769  {
770  	struct mlx5_fs_chains *chains;
771  
772  	chains = mlx5_chains_init(dev, attr);
773  
774  	return chains;
775  }
776  
777  void
mlx5_chains_destroy(struct mlx5_fs_chains * chains)778  mlx5_chains_destroy(struct mlx5_fs_chains *chains)
779  {
780  	mlx5_chains_cleanup(chains);
781  }
782  
783  int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains * chains,u32 chain,u32 * chain_mapping)784  mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
785  			      u32 *chain_mapping)
786  {
787  	struct mapping_ctx *ctx = chains->chains_mapping;
788  	struct mlx5_mapped_obj mapped_obj = {};
789  
790  	mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
791  	mapped_obj.chain = chain;
792  	return mapping_add(ctx, &mapped_obj, chain_mapping);
793  }
794  
795  int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains * chains,u32 chain_mapping)796  mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
797  {
798  	struct mapping_ctx *ctx = chains->chains_mapping;
799  
800  	return mapping_remove(ctx, chain_mapping);
801  }
802  
803  void
mlx5_chains_print_info(struct mlx5_fs_chains * chains)804  mlx5_chains_print_info(struct mlx5_fs_chains *chains)
805  {
806  	mlx5_core_dbg(chains->dev, "Flow table chains groups(%d)\n", chains->group_num);
807  }
808