1  /*
2   * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3   *
4   * This software is available to you under a choice of one of two
5   * licenses.  You may choose to be licensed under the terms of the GNU
6   * General Public License (GPL) Version 2, available from the file
7   * COPYING in the main directory of this source tree, or the
8   * OpenIB.org BSD license below:
9   *
10   *     Redistribution and use in source and binary forms, with or
11   *     without modification, are permitted provided that the following
12   *     conditions are met:
13   *
14   *      - Redistributions of source code must retain the above
15   *        copyright notice, this list of conditions and the following
16   *        disclaimer.
17   *
18   *      - Redistributions in binary form must reproduce the above
19   *        copyright notice, this list of conditions and the following
20   *        disclaimer in the documentation and/or other materials
21   *        provided with the distribution.
22   *
23   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30   * SOFTWARE.
31   */
32  
33  #ifndef __MLX5_EN_TC_H__
34  #define __MLX5_EN_TC_H__
35  
36  #include <net/pkt_cls.h>
37  #include "en.h"
38  #include "eswitch.h"
39  #include "en/tc_ct.h"
40  #include "en/tc_tun.h"
41  #include "en/tc/int_port.h"
42  #include "en/tc/meter.h"
43  #include "en_rep.h"
44  
45  #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
46  
47  #ifdef CONFIG_MLX5_ESWITCH
48  
49  #define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
50  			  sizeof(struct mlx5_nic_flow_attr))
51  #define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
52  			  sizeof(struct mlx5_esw_flow_attr))
53  #define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
54  			    ESW_FLOW_ATTR_SZ :\
55  			    NIC_FLOW_ATTR_SZ)
56  
57  struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
58  int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
59  
60  struct mlx5e_tc_update_priv {
61  	struct net_device *fwd_dev;
62  	bool skb_done;
63  	bool forward_tx;
64  };
65  
66  struct mlx5_nic_flow_attr {
67  	u32 flow_tag;
68  	u32 hairpin_tirn;
69  	struct mlx5_flow_table *hairpin_ft;
70  };
71  
72  struct mlx5_flow_attr {
73  	u32 action;
74  	unsigned long tc_act_cookies[TCA_ACT_MAX_PRIO];
75  	struct mlx5_fc *counter;
76  	struct mlx5_modify_hdr *modify_hdr;
77  	struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
78  	struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
79  	struct mlx5_ct_attr ct_attr;
80  	struct mlx5e_sample_attr sample_attr;
81  	struct mlx5e_meter_attr meter_attr;
82  	struct mlx5e_tc_flow_parse_attr *parse_attr;
83  	u32 chain;
84  	u16 prio;
85  	u16 tc_act_cookies_count;
86  	u32 dest_chain;
87  	struct mlx5_flow_table *ft;
88  	struct mlx5_flow_table *dest_ft;
89  	struct mlx5_flow_table *extra_split_ft;
90  	u8 inner_match_level;
91  	u8 outer_match_level;
92  	u8 tun_ip_version;
93  	int tunnel_id; /* mapped tunnel id */
94  	u32 flags;
95  	u32 exe_aso_type;
96  	struct list_head list;
97  	struct mlx5e_post_act_handle *post_act_handle;
98  	struct mlx5_flow_attr *branch_true;
99  	struct mlx5_flow_attr *branch_false;
100  	struct mlx5_flow_attr *jumping_attr;
101  	struct mlx5_flow_handle *act_id_restore_rule;
102  	/* keep this union last */
103  	union {
104  		DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
105  		DECLARE_FLEX_ARRAY(struct mlx5_nic_flow_attr, nic_attr);
106  	};
107  };
108  
109  enum {
110  	MLX5_ATTR_FLAG_VLAN_HANDLED  = BIT(0),
111  	MLX5_ATTR_FLAG_SLOW_PATH     = BIT(1),
112  	MLX5_ATTR_FLAG_NO_IN_PORT    = BIT(2),
113  	MLX5_ATTR_FLAG_SRC_REWRITE   = BIT(3),
114  	MLX5_ATTR_FLAG_SAMPLE        = BIT(4),
115  	MLX5_ATTR_FLAG_ACCEPT        = BIT(5),
116  	MLX5_ATTR_FLAG_CT            = BIT(6),
117  	MLX5_ATTR_FLAG_TERMINATING   = BIT(7),
118  	MLX5_ATTR_FLAG_MTU           = BIT(8),
119  };
120  
121  /* Returns true if any of the flags that require skipping further TC/NF processing are set. */
122  static inline bool
mlx5e_tc_attr_flags_skip(u32 attr_flags)123  mlx5e_tc_attr_flags_skip(u32 attr_flags)
124  {
125  	return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
126  }
127  
128  struct mlx5_rx_tun_attr {
129  	u16 decap_vport;
130  	union {
131  		__be32 v4;
132  		struct in6_addr v6;
133  	} src_ip; /* Valid if decap_vport is not zero */
134  	union {
135  		__be32 v4;
136  		struct in6_addr v6;
137  	} dst_ip; /* Valid if decap_vport is not zero */
138  };
139  
140  #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
141  #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
142  
143  #define MLX5E_TC_MAX_INT_PORT_NUM (32)
144  
145  #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
146  
147  struct tunnel_match_key {
148  	struct flow_dissector_key_control enc_control;
149  	struct flow_dissector_key_keyid enc_key_id;
150  	struct flow_dissector_key_ports enc_tp;
151  	struct flow_dissector_key_ip enc_ip;
152  	union {
153  		struct flow_dissector_key_ipv4_addrs enc_ipv4;
154  		struct flow_dissector_key_ipv6_addrs enc_ipv6;
155  	};
156  
157  	int filter_ifindex;
158  };
159  
160  struct tunnel_match_enc_opts {
161  	struct flow_dissector_key_enc_opts key;
162  	struct flow_dissector_key_enc_opts mask;
163  };
164  
165  /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
166   * Upper TUNNEL_INFO_BITS for general tunnel info.
167   * Lower ENC_OPTS_BITS bits for enc_opts.
168   */
169  #define TUNNEL_INFO_BITS 12
170  #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
171  #define ENC_OPTS_BITS 11
172  #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
173  #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
174  #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
175  
176  enum {
177  	MLX5E_TC_FLAG_INGRESS_BIT,
178  	MLX5E_TC_FLAG_EGRESS_BIT,
179  	MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
180  	MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
181  	MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
182  	MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
183  };
184  
185  #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
186  
187  int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
188  void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
189  
190  int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
191  void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
192  
193  int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
194  			   struct flow_cls_offload *f, unsigned long flags);
195  int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
196  			struct flow_cls_offload *f, unsigned long flags);
197  
198  int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
199  		       struct flow_cls_offload *f, unsigned long flags);
200  int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
201  			       struct flow_offload_action *fl_act);
202  
203  int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
204  				struct tc_cls_matchall_offload *f);
205  int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
206  			     struct tc_cls_matchall_offload *f);
207  
208  struct mlx5e_encap_entry;
209  void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
210  			      struct mlx5e_encap_entry *e,
211  			      struct list_head *flow_list);
212  void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
213  			      struct mlx5e_encap_entry *e,
214  			      struct list_head *flow_list);
215  bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
216  void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
217  
218  void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
219  void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
220  
221  struct mlx5e_neigh_hash_entry;
222  struct mlx5e_encap_entry *
223  mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
224  			  struct mlx5e_encap_entry *e);
225  void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
226  
227  void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
228  
229  enum mlx5e_tc_attr_to_reg {
230  	MAPPED_OBJ_TO_REG,
231  	VPORT_TO_REG,
232  	TUNNEL_TO_REG,
233  	CTSTATE_TO_REG,
234  	ZONE_TO_REG,
235  	ZONE_RESTORE_TO_REG,
236  	MARK_TO_REG,
237  	LABELS_TO_REG,
238  	FTEID_TO_REG,
239  	NIC_MAPPED_OBJ_TO_REG,
240  	NIC_ZONE_RESTORE_TO_REG,
241  	PACKET_COLOR_TO_REG,
242  };
243  
244  struct mlx5e_tc_attr_to_reg_mapping {
245  	int mfield; /* rewrite field */
246  	int moffset; /* bit offset of mfield */
247  	int mlen; /* bits to rewrite/match */
248  
249  	int soffset; /* byte offset of spec for match */
250  };
251  
252  extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
253  
254  #define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
255  #define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
256  #define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
257  
258  bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
259  				    struct net_device *out_dev);
260  
261  int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
262  			      struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
263  			      enum mlx5_flow_namespace_type ns,
264  			      enum mlx5e_tc_attr_to_reg type,
265  			      u32 data);
266  
267  void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
268  					  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
269  					  enum mlx5e_tc_attr_to_reg type,
270  					  int act_id, u32 data);
271  
272  void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
273  				 enum mlx5e_tc_attr_to_reg type,
274  				 u32 data,
275  				 u32 mask);
276  
277  void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
278  				     enum mlx5e_tc_attr_to_reg type,
279  				     u32 *data,
280  				     u32 *mask);
281  
282  int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
283  					 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
284  					 enum mlx5_flow_namespace_type ns,
285  					 enum mlx5e_tc_attr_to_reg type,
286  					 u32 data);
287  
288  int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
289  			    struct mlx5e_tc_flow *flow,
290  			    struct mlx5_flow_attr *attr);
291  
292  void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
293  			     struct mlx5e_tc_flow *flow,
294  			     struct mlx5_flow_attr *attr);
295  
296  void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
297  			    struct flow_match_basic *match, bool outer,
298  			    void *headers_c, void *headers_v);
299  
300  int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
301  void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
302  
303  int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
304  			    void *cb_priv);
305  
306  struct mlx5_flow_handle *
307  mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
308  			     struct mlx5_flow_spec *spec,
309  			     struct mlx5_flow_attr *attr);
310  void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
311  				  struct mlx5_flow_handle *rule,
312  				  struct mlx5_flow_attr *attr);
313  
314  struct mlx5_flow_handle *
315  mlx5_tc_rule_insert(struct mlx5e_priv *priv,
316  		    struct mlx5_flow_spec *spec,
317  		    struct mlx5_flow_attr *attr);
318  void
319  mlx5_tc_rule_delete(struct mlx5e_priv *priv,
320  		    struct mlx5_flow_handle *rule,
321  		    struct mlx5_flow_attr *attr);
322  
323  bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev);
324  int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
325  			       u16 *vport);
326  
327  int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
328  				      struct mlx5_flow_attr *attr,
329  				      int ifindex,
330  				      enum mlx5e_tc_int_port_type type,
331  				      u32 *action,
332  				      int out_index);
333  #else /* CONFIG_MLX5_CLS_ACT */
mlx5e_tc_nic_init(struct mlx5e_priv * priv)334  static inline int  mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
mlx5e_tc_nic_cleanup(struct mlx5e_priv * priv)335  static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
mlx5e_tc_ht_init(struct rhashtable * tc_ht)336  static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
mlx5e_tc_ht_cleanup(struct rhashtable * tc_ht)337  static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
338  static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)339  mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
340  { return -EOPNOTSUPP; }
341  
342  #endif /* CONFIG_MLX5_CLS_ACT */
343  
344  struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
345  
346  struct mlx5_flow_handle *
347  mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
348  			     struct mlx5_flow_spec *spec,
349  			     struct mlx5_flow_attr *attr);
350  void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
351  				  struct mlx5_flow_handle *rule,
352  				  struct mlx5_flow_attr *attr);
353  
354  #else /* CONFIG_MLX5_ESWITCH */
mlx5e_tc_nic_init(struct mlx5e_priv * priv)355  static inline int  mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
mlx5e_tc_nic_cleanup(struct mlx5e_priv * priv)356  static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
mlx5e_tc_num_filters(struct mlx5e_priv * priv,unsigned long flags)357  static inline int  mlx5e_tc_num_filters(struct mlx5e_priv *priv,
358  					unsigned long flags)
359  {
360  	return 0;
361  }
362  
363  static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)364  mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
365  { return -EOPNOTSUPP; }
366  #endif
367  
368  #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
369  struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
370  void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
mlx5e_cqe_regb_chain(struct mlx5_cqe64 * cqe)371  static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
372  {
373  	u32 chain, reg_b;
374  
375  	reg_b = be32_to_cpu(cqe->ft_metadata);
376  
377  	if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS))
378  		return false;
379  
380  	chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
381  	if (chain)
382  		return true;
383  
384  	return false;
385  }
386  
387  bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
388  bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
389  			 struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
390  			 struct mlx5_tc_ct_priv *ct_priv,
391  			 u32 zone_restore_id, u32 tunnel_id,
392  			 struct mlx5e_tc_update_priv *tc_priv);
393  #else /* CONFIG_MLX5_CLS_ACT */
mlx5e_tc_table_alloc(void)394  static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
mlx5e_tc_table_free(struct mlx5e_tc_table * tc)395  static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
mlx5e_cqe_regb_chain(struct mlx5_cqe64 * cqe)396  static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
397  { return false; }
398  static inline bool
mlx5e_tc_update_skb_nic(struct mlx5_cqe64 * cqe,struct sk_buff * skb)399  mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
400  { return true; }
401  #endif
402  
403  int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
404  				     u64 act_miss_cookie, u32 *act_miss_mapping);
405  void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
406  				      u32 act_miss_mapping);
407  
408  #endif /* __MLX5_EN_TC_H__ */
409