1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/skbuff.h>
5 #include <net/psample.h>
6 #include "en/mapping.h"
7 #include "en/tc/post_act.h"
8 #include "en/tc/act/sample.h"
9 #include "en/mod_hdr.h"
10 #include "sample.h"
11 #include "eswitch.h"
12 #include "en_tc.h"
13 #include "fs_core.h"
14
15 #define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
16
17 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
18 .max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
19 .max_num_groups = 0, /* default num of groups */
20 .flags = 0,
21 };
22
23 struct mlx5e_tc_psample {
24 struct mlx5_eswitch *esw;
25 struct mlx5_flow_table *termtbl;
26 struct mlx5_flow_handle *termtbl_rule;
27 DECLARE_HASHTABLE(hashtbl, 8);
28 struct mutex ht_lock; /* protect hashtbl */
29 DECLARE_HASHTABLE(restore_hashtbl, 8);
30 struct mutex restore_lock; /* protect restore_hashtbl */
31 struct mlx5e_post_act *post_act;
32 };
33
34 struct mlx5e_sampler {
35 struct hlist_node hlist;
36 u32 sampler_id;
37 u32 sample_ratio;
38 u32 sample_table_id;
39 u32 default_table_id;
40 int count;
41 };
42
43 struct mlx5e_sample_flow {
44 struct mlx5e_sampler *sampler;
45 struct mlx5e_sample_restore *restore;
46 struct mlx5_flow_attr *pre_attr;
47 struct mlx5_flow_handle *pre_rule;
48 struct mlx5_flow_attr *post_attr;
49 struct mlx5_flow_handle *post_rule;
50 };
51
52 struct mlx5e_sample_restore {
53 struct hlist_node hlist;
54 struct mlx5_modify_hdr *modify_hdr;
55 struct mlx5_flow_handle *rule;
56 u32 obj_id;
57 int count;
58 };
59
60 static int
sampler_termtbl_create(struct mlx5e_tc_psample * tc_psample)61 sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample)
62 {
63 struct mlx5_eswitch *esw = tc_psample->esw;
64 struct mlx5_flow_table_attr ft_attr = {};
65 struct mlx5_flow_destination dest = {};
66 struct mlx5_core_dev *dev = esw->dev;
67 struct mlx5_flow_namespace *root_ns;
68 struct mlx5_flow_act act = {};
69 int err;
70
71 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, termination_table)) {
72 mlx5_core_warn(dev, "termination table is not supported\n");
73 return -EOPNOTSUPP;
74 }
75
76 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
77 if (!root_ns) {
78 mlx5_core_warn(dev, "failed to get FDB flow namespace\n");
79 return -EOPNOTSUPP;
80 }
81
82 ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED;
83 ft_attr.autogroup.max_num_groups = 1;
84 ft_attr.prio = FDB_SLOW_PATH;
85 ft_attr.max_fte = 1;
86 ft_attr.level = 1;
87 tc_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
88 if (IS_ERR(tc_psample->termtbl)) {
89 err = PTR_ERR(tc_psample->termtbl);
90 mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err);
91 return err;
92 }
93
94 act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
95 dest.vport.num = esw->manager_vport;
96 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
97 tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1);
98 if (IS_ERR(tc_psample->termtbl_rule)) {
99 err = PTR_ERR(tc_psample->termtbl_rule);
100 mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err);
101 mlx5_destroy_flow_table(tc_psample->termtbl);
102 return err;
103 }
104
105 return 0;
106 }
107
108 static void
sampler_termtbl_destroy(struct mlx5e_tc_psample * tc_psample)109 sampler_termtbl_destroy(struct mlx5e_tc_psample *tc_psample)
110 {
111 mlx5_del_flow_rules(tc_psample->termtbl_rule);
112 mlx5_destroy_flow_table(tc_psample->termtbl);
113 }
114
115 static int
sampler_obj_create(struct mlx5_core_dev * mdev,struct mlx5e_sampler * sampler)116 sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5e_sampler *sampler)
117 {
118 u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {};
119 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
120 u64 general_obj_types;
121 void *obj;
122 int err;
123
124 general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
125 if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER))
126 return -EOPNOTSUPP;
127 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
128 return -EOPNOTSUPP;
129
130 obj = MLX5_ADDR_OF(create_sampler_obj_in, in, sampler_object);
131 MLX5_SET(sampler_obj, obj, table_type, FS_FT_FDB);
132 MLX5_SET(sampler_obj, obj, ignore_flow_level, 1);
133 MLX5_SET(sampler_obj, obj, level, 1);
134 MLX5_SET(sampler_obj, obj, sample_ratio, sampler->sample_ratio);
135 MLX5_SET(sampler_obj, obj, sample_table_id, sampler->sample_table_id);
136 MLX5_SET(sampler_obj, obj, default_table_id, sampler->default_table_id);
137 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
138 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
139
140 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
141 if (!err)
142 sampler->sampler_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
143
144 return err;
145 }
146
147 static void
sampler_obj_destroy(struct mlx5_core_dev * mdev,u32 sampler_id)148 sampler_obj_destroy(struct mlx5_core_dev *mdev, u32 sampler_id)
149 {
150 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
151 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
152
153 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
154 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
155 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
156
157 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
158 }
159
160 static u32
sampler_hash(u32 sample_ratio,u32 default_table_id)161 sampler_hash(u32 sample_ratio, u32 default_table_id)
162 {
163 return jhash_2words(sample_ratio, default_table_id, 0);
164 }
165
166 static int
sampler_cmp(u32 sample_ratio1,u32 default_table_id1,u32 sample_ratio2,u32 default_table_id2)167 sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 default_table_id2)
168 {
169 return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2;
170 }
171
172 static struct mlx5e_sampler *
sampler_get(struct mlx5e_tc_psample * tc_psample,u32 sample_ratio,u32 default_table_id)173 sampler_get(struct mlx5e_tc_psample *tc_psample, u32 sample_ratio, u32 default_table_id)
174 {
175 struct mlx5e_sampler *sampler;
176 u32 hash_key;
177 int err;
178
179 mutex_lock(&tc_psample->ht_lock);
180 hash_key = sampler_hash(sample_ratio, default_table_id);
181 hash_for_each_possible(tc_psample->hashtbl, sampler, hlist, hash_key)
182 if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id,
183 sample_ratio, default_table_id))
184 goto add_ref;
185
186 sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
187 if (!sampler) {
188 err = -ENOMEM;
189 goto err_alloc;
190 }
191
192 sampler->sample_table_id = tc_psample->termtbl->id;
193 sampler->default_table_id = default_table_id;
194 sampler->sample_ratio = sample_ratio;
195
196 err = sampler_obj_create(tc_psample->esw->dev, sampler);
197 if (err)
198 goto err_create;
199
200 hash_add(tc_psample->hashtbl, &sampler->hlist, hash_key);
201
202 add_ref:
203 sampler->count++;
204 mutex_unlock(&tc_psample->ht_lock);
205 return sampler;
206
207 err_create:
208 kfree(sampler);
209 err_alloc:
210 mutex_unlock(&tc_psample->ht_lock);
211 return ERR_PTR(err);
212 }
213
214 static void
sampler_put(struct mlx5e_tc_psample * tc_psample,struct mlx5e_sampler * sampler)215 sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler)
216 {
217 mutex_lock(&tc_psample->ht_lock);
218 if (--sampler->count == 0) {
219 hash_del(&sampler->hlist);
220 sampler_obj_destroy(tc_psample->esw->dev, sampler->sampler_id);
221 kfree(sampler);
222 }
223 mutex_unlock(&tc_psample->ht_lock);
224 }
225
226 /* obj_id is used to restore the sample parameters.
227 * Set fte_id in original flow table, then match it in the default table.
228 * Only set it for NICs can preserve reg_c or decap action. For other cases,
229 * use the same match in the default table.
230 * Use one header rewrite for both obj_id and fte_id.
231 */
232 static struct mlx5_modify_hdr *
sample_modify_hdr_get(struct mlx5_core_dev * mdev,u32 obj_id,struct mlx5e_tc_mod_hdr_acts * mod_acts)233 sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
234 struct mlx5e_tc_mod_hdr_acts *mod_acts)
235 {
236 struct mlx5_modify_hdr *modify_hdr;
237 int err;
238
239 err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
240 MAPPED_OBJ_TO_REG, obj_id);
241 if (err)
242 goto err_set_regc0;
243
244 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
245 mod_acts->num_actions,
246 mod_acts->actions);
247 if (IS_ERR(modify_hdr)) {
248 err = PTR_ERR(modify_hdr);
249 goto err_modify_hdr;
250 }
251
252 mlx5e_mod_hdr_dealloc(mod_acts);
253 return modify_hdr;
254
255 err_modify_hdr:
256 mlx5e_mod_hdr_dealloc(mod_acts);
257 err_set_regc0:
258 return ERR_PTR(err);
259 }
260
261 static struct mlx5e_sample_restore *
sample_restore_get(struct mlx5e_tc_psample * tc_psample,u32 obj_id,struct mlx5e_tc_mod_hdr_acts * mod_acts)262 sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
263 struct mlx5e_tc_mod_hdr_acts *mod_acts)
264 {
265 struct mlx5_eswitch *esw = tc_psample->esw;
266 struct mlx5_core_dev *mdev = esw->dev;
267 struct mlx5e_sample_restore *restore;
268 struct mlx5_modify_hdr *modify_hdr;
269 int err;
270
271 mutex_lock(&tc_psample->restore_lock);
272 hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id)
273 if (restore->obj_id == obj_id)
274 goto add_ref;
275
276 restore = kzalloc(sizeof(*restore), GFP_KERNEL);
277 if (!restore) {
278 err = -ENOMEM;
279 goto err_alloc;
280 }
281 restore->obj_id = obj_id;
282
283 modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts);
284 if (IS_ERR(modify_hdr)) {
285 err = PTR_ERR(modify_hdr);
286 goto err_modify_hdr;
287 }
288 restore->modify_hdr = modify_hdr;
289
290 restore->rule = esw_add_restore_rule(esw, obj_id);
291 if (IS_ERR(restore->rule)) {
292 err = PTR_ERR(restore->rule);
293 goto err_restore;
294 }
295
296 hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id);
297 add_ref:
298 restore->count++;
299 mutex_unlock(&tc_psample->restore_lock);
300 return restore;
301
302 err_restore:
303 mlx5_modify_header_dealloc(mdev, restore->modify_hdr);
304 err_modify_hdr:
305 kfree(restore);
306 err_alloc:
307 mutex_unlock(&tc_psample->restore_lock);
308 return ERR_PTR(err);
309 }
310
311 static void
sample_restore_put(struct mlx5e_tc_psample * tc_psample,struct mlx5e_sample_restore * restore)312 sample_restore_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sample_restore *restore)
313 {
314 mutex_lock(&tc_psample->restore_lock);
315 if (--restore->count == 0)
316 hash_del(&restore->hlist);
317 mutex_unlock(&tc_psample->restore_lock);
318
319 if (!restore->count) {
320 mlx5_del_flow_rules(restore->rule);
321 mlx5_modify_header_dealloc(tc_psample->esw->dev, restore->modify_hdr);
322 kfree(restore);
323 }
324 }
325
mlx5e_tc_sample_skb(struct sk_buff * skb,struct mlx5_mapped_obj * mapped_obj)326 void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj)
327 {
328 u32 trunc_size = mapped_obj->sample.trunc_size;
329 struct psample_group psample_group = {};
330 struct psample_metadata md = {};
331
332 md.trunc_size = trunc_size ? min(trunc_size, skb->len) : skb->len;
333 md.in_ifindex = skb->dev->ifindex;
334 psample_group.group_num = mapped_obj->sample.group_id;
335 psample_group.net = &init_net;
336 skb_push(skb, skb->mac_len);
337
338 psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md);
339 }
340
341 static int
add_post_rule(struct mlx5_eswitch * esw,struct mlx5e_sample_flow * sample_flow,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr,u32 * default_tbl_id)342 add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
343 struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr,
344 u32 *default_tbl_id)
345 {
346 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
347 u32 attr_sz = ns_to_attr_sz(MLX5_FLOW_NAMESPACE_FDB);
348 struct mlx5_vport_tbl_attr per_vport_tbl_attr;
349 struct mlx5_flow_table *default_tbl;
350 struct mlx5_flow_attr *post_attr;
351 int err;
352
353 /* Allocate default table per vport, chain and prio. Otherwise, there is
354 * only one default table for the same sampler object. Rules with different
355 * prio and chain may overlap. For CT sample action, per vport default
356 * table is needed to resotre the metadata.
357 */
358 per_vport_tbl_attr.chain = attr->chain;
359 per_vport_tbl_attr.prio = attr->prio;
360 per_vport_tbl_attr.vport = esw_attr->in_rep->vport;
361 per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
362 default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr);
363 if (IS_ERR(default_tbl)) {
364 err = PTR_ERR(default_tbl);
365 goto err_default_tbl;
366 }
367 *default_tbl_id = default_tbl->id;
368
369 post_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
370 if (!post_attr) {
371 err = -ENOMEM;
372 goto err_attr;
373 }
374 sample_flow->post_attr = post_attr;
375 memcpy(post_attr, attr, attr_sz);
376 /* Perform the original matches on the default table.
377 * Offload all actions except the sample action.
378 */
379 post_attr->chain = 0;
380 post_attr->prio = 0;
381 post_attr->ft = default_tbl;
382 post_attr->flags = MLX5_ATTR_FLAG_NO_IN_PORT;
383
384 /* When offloading sample and encap action, if there is no valid
385 * neigh data struct, a slow path rule is offloaded first. Source
386 * port metadata match is set at that time. A per vport table is
387 * already allocated. No need to match it again. So clear the source
388 * port metadata match.
389 */
390 mlx5_eswitch_clear_rule_source_port(esw, spec);
391 sample_flow->post_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, post_attr);
392 if (IS_ERR(sample_flow->post_rule)) {
393 err = PTR_ERR(sample_flow->post_rule);
394 goto err_rule;
395 }
396 return 0;
397
398 err_rule:
399 kfree(post_attr);
400 err_attr:
401 mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
402 err_default_tbl:
403 return err;
404 }
405
406 static void
del_post_rule(struct mlx5_eswitch * esw,struct mlx5e_sample_flow * sample_flow,struct mlx5_flow_attr * attr)407 del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
408 struct mlx5_flow_attr *attr)
409 {
410 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
411 struct mlx5_vport_tbl_attr tbl_attr;
412
413 mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, sample_flow->post_attr);
414 kfree(sample_flow->post_attr);
415 tbl_attr.chain = attr->chain;
416 tbl_attr.prio = attr->prio;
417 tbl_attr.vport = esw_attr->in_rep->vport;
418 tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
419 mlx5_esw_vporttbl_put(esw, &tbl_attr);
420 }
421
422 /* For the following typical flow table:
423 *
424 * +-------------------------------+
425 * + original flow table +
426 * +-------------------------------+
427 * + original match +
428 * +-------------------------------+
429 * + sample action + other actions +
430 * +-------------------------------+
431 *
432 * We translate the tc filter with sample action to the following HW model:
433 *
434 * +---------------------+
435 * + original flow table +
436 * +---------------------+
437 * + original match +
438 * +---------------------+
439 * | set fte_id (if reg_c preserve cap)
440 * | do decap (if required)
441 * v
442 * +------------------------------------------------+
443 * + Flow Sampler Object +
444 * +------------------------------------------------+
445 * + sample ratio +
446 * +------------------------------------------------+
447 * + sample table id | default table id +
448 * +------------------------------------------------+
449 * | |
450 * v v
451 * +-----------------------------+ +-------------------+
452 * + sample table + + default table +
453 * +-----------------------------+ +-------------------+
454 * + forward to management vport + |
455 * +-----------------------------+ |
456 * +-------+------+
457 * | |reg_c preserve cap
458 * | |or decap action
459 * v v
460 * +-----------------+ +-------------+
461 * + per vport table + + post action +
462 * +-----------------+ +-------------+
463 * + original match +
464 * +-----------------+
465 * + other actions +
466 * +-----------------+
467 */
468 struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample * tc_psample,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)469 mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
470 struct mlx5_flow_spec *spec,
471 struct mlx5_flow_attr *attr)
472 {
473 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
474 struct mlx5_esw_flow_attr *pre_esw_attr;
475 struct mlx5_mapped_obj restore_obj = {};
476 struct mlx5e_tc_mod_hdr_acts *mod_acts;
477 struct mlx5e_sample_flow *sample_flow;
478 struct mlx5e_sample_attr *sample_attr;
479 struct mlx5_flow_attr *pre_attr;
480 struct mlx5_eswitch *esw;
481 u32 default_tbl_id;
482 u32 obj_id;
483 int err;
484
485 if (IS_ERR_OR_NULL(tc_psample))
486 return ERR_PTR(-EOPNOTSUPP);
487
488 sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
489 if (!sample_flow)
490 return ERR_PTR(-ENOMEM);
491 sample_attr = &attr->sample_attr;
492 sample_attr->sample_flow = sample_flow;
493
494 /* For NICs with reg_c_preserve support or decap action, use
495 * post action instead of the per vport, chain and prio table.
496 * Only match the fte id instead of the same match in the
497 * original flow table.
498 */
499 esw = tc_psample->esw;
500 if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) {
501 struct mlx5_flow_table *ft;
502
503 ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
504 default_tbl_id = ft->id;
505 } else {
506 err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
507 if (err)
508 goto err_post_rule;
509 }
510
511 /* Create sampler object. */
512 sample_flow->sampler = sampler_get(tc_psample, sample_attr->rate, default_tbl_id);
513 if (IS_ERR(sample_flow->sampler)) {
514 err = PTR_ERR(sample_flow->sampler);
515 goto err_sampler;
516 }
517 sample_attr->sampler_id = sample_flow->sampler->sampler_id;
518
519 /* Create an id mapping reg_c0 value to sample object. */
520 restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
521 restore_obj.sample.group_id = sample_attr->group_num;
522 restore_obj.sample.rate = sample_attr->rate;
523 restore_obj.sample.trunc_size = sample_attr->trunc_size;
524 restore_obj.sample.tunnel_id = attr->tunnel_id;
525 err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
526 if (err)
527 goto err_obj_id;
528 sample_attr->restore_obj_id = obj_id;
529
530 /* Create sample restore context. */
531 mod_acts = &attr->parse_attr->mod_hdr_acts;
532 sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts);
533 if (IS_ERR(sample_flow->restore)) {
534 err = PTR_ERR(sample_flow->restore);
535 goto err_sample_restore;
536 }
537
538 /* Perform the original matches on the original table. Offload the
539 * sample action. The destination is the sampler object.
540 */
541 pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
542 if (!pre_attr) {
543 err = -ENOMEM;
544 goto err_alloc_pre_flow_attr;
545 }
546 pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
547 /* For decap action, do decap in the original flow table instead of the
548 * default flow table.
549 */
550 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
551 pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
552 pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
553 pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
554 pre_attr->inner_match_level = attr->inner_match_level;
555 pre_attr->outer_match_level = attr->outer_match_level;
556 pre_attr->chain = attr->chain;
557 pre_attr->prio = attr->prio;
558 pre_attr->ft = attr->ft;
559 pre_attr->sample_attr = *sample_attr;
560 pre_esw_attr = pre_attr->esw_attr;
561 pre_esw_attr->in_mdev = esw_attr->in_mdev;
562 pre_esw_attr->in_rep = esw_attr->in_rep;
563 sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr);
564 if (IS_ERR(sample_flow->pre_rule)) {
565 err = PTR_ERR(sample_flow->pre_rule);
566 goto err_pre_offload_rule;
567 }
568 sample_flow->pre_attr = pre_attr;
569
570 return sample_flow->pre_rule;
571
572 err_pre_offload_rule:
573 kfree(pre_attr);
574 err_alloc_pre_flow_attr:
575 sample_restore_put(tc_psample, sample_flow->restore);
576 err_sample_restore:
577 mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id);
578 err_obj_id:
579 sampler_put(tc_psample, sample_flow->sampler);
580 err_sampler:
581 if (sample_flow->post_rule)
582 del_post_rule(esw, sample_flow, attr);
583 err_post_rule:
584 kfree(sample_flow);
585 return ERR_PTR(err);
586 }
587
588 void
mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample * tc_psample,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)589 mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
590 struct mlx5_flow_handle *rule,
591 struct mlx5_flow_attr *attr)
592 {
593 struct mlx5e_sample_flow *sample_flow;
594 struct mlx5_eswitch *esw;
595
596 if (IS_ERR_OR_NULL(tc_psample))
597 return;
598
599 /* The following delete order can't be changed, otherwise,
600 * will hit fw syndromes.
601 */
602 esw = tc_psample->esw;
603 sample_flow = attr->sample_attr.sample_flow;
604 mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
605
606 sample_restore_put(tc_psample, sample_flow->restore);
607 mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
608 sampler_put(tc_psample, sample_flow->sampler);
609 if (sample_flow->post_rule)
610 del_post_rule(esw, sample_flow, attr);
611
612 kfree(sample_flow->pre_attr);
613 kfree(sample_flow);
614 }
615
616 struct mlx5e_tc_psample *
mlx5e_tc_sample_init(struct mlx5_eswitch * esw,struct mlx5e_post_act * post_act)617 mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
618 {
619 struct mlx5e_tc_psample *tc_psample;
620 int err;
621
622 tc_psample = kzalloc(sizeof(*tc_psample), GFP_KERNEL);
623 if (!tc_psample)
624 return ERR_PTR(-ENOMEM);
625 if (IS_ERR_OR_NULL(post_act)) {
626 err = PTR_ERR(post_act);
627 goto err_post_act;
628 }
629 tc_psample->post_act = post_act;
630 tc_psample->esw = esw;
631 err = sampler_termtbl_create(tc_psample);
632 if (err)
633 goto err_post_act;
634
635 mutex_init(&tc_psample->ht_lock);
636 mutex_init(&tc_psample->restore_lock);
637
638 return tc_psample;
639
640 err_post_act:
641 kfree(tc_psample);
642 return ERR_PTR(err);
643 }
644
645 void
mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample * tc_psample)646 mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample)
647 {
648 if (IS_ERR_OR_NULL(tc_psample))
649 return;
650
651 mutex_destroy(&tc_psample->restore_lock);
652 mutex_destroy(&tc_psample->ht_lock);
653 sampler_termtbl_destroy(tc_psample);
654 kfree(tc_psample);
655 }
656