1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14 
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18 
19 struct mlx5e_ipsec_fc {
20 	struct mlx5_fc *cnt;
21 	struct mlx5_fc *drop;
22 };
23 
24 struct mlx5e_ipsec_tx {
25 	struct mlx5e_ipsec_ft ft;
26 	struct mlx5e_ipsec_miss pol;
27 	struct mlx5e_ipsec_miss sa;
28 	struct mlx5e_ipsec_rule status;
29 	struct mlx5_flow_namespace *ns;
30 	struct mlx5e_ipsec_fc *fc;
31 	struct mlx5_fs_chains *chains;
32 	u8 allow_tunnel_mode : 1;
33 };
34 
35 struct mlx5e_ipsec_status_checks {
36 	struct mlx5_flow_group *drop_all_group;
37 	struct mlx5e_ipsec_drop all;
38 };
39 
40 struct mlx5e_ipsec_rx {
41 	struct mlx5e_ipsec_ft ft;
42 	struct mlx5e_ipsec_miss pol;
43 	struct mlx5e_ipsec_miss sa;
44 	struct mlx5e_ipsec_rule status;
45 	struct mlx5e_ipsec_status_checks status_drops;
46 	struct mlx5e_ipsec_fc *fc;
47 	struct mlx5_fs_chains *chains;
48 	u8 allow_tunnel_mode : 1;
49 };
50 
51 /* IPsec RX flow steering */
family2tt(u32 family)52 static enum mlx5_traffic_types family2tt(u32 family)
53 {
54 	if (family == AF_INET)
55 		return MLX5_TT_IPV4_IPSEC_ESP;
56 	return MLX5_TT_IPV6_IPSEC_ESP;
57 }
58 
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)59 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
60 {
61 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
62 		return ipsec->rx_esw;
63 
64 	if (family == AF_INET)
65 		return ipsec->rx_ipv4;
66 
67 	return ipsec->rx_ipv6;
68 }
69 
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)70 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
71 {
72 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
73 		return ipsec->tx_esw;
74 
75 	return ipsec->tx;
76 }
77 
78 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)79 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
80 		    enum mlx5_flow_namespace_type ns, int base_prio,
81 		    int base_level, struct mlx5_flow_table **root_ft)
82 {
83 	struct mlx5_chains_attr attr = {};
84 	struct mlx5_fs_chains *chains;
85 	struct mlx5_flow_table *ft;
86 	int err;
87 
88 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
89 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
90 	attr.max_grp_num = 2;
91 	attr.default_ft = miss_ft;
92 	attr.ns = ns;
93 	attr.fs_base_prio = base_prio;
94 	attr.fs_base_level = base_level;
95 	chains = mlx5_chains_create(mdev, &attr);
96 	if (IS_ERR(chains))
97 		return chains;
98 
99 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
100 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
101 	if (IS_ERR(ft)) {
102 		err = PTR_ERR(ft);
103 		goto err_chains_get;
104 	}
105 
106 	*root_ft = ft;
107 	return chains;
108 
109 err_chains_get:
110 	mlx5_chains_destroy(chains);
111 	return ERR_PTR(err);
112 }
113 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)114 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
115 {
116 	mlx5_chains_put_table(chains, 0, 1, 0);
117 	mlx5_chains_destroy(chains);
118 }
119 
120 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)121 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
122 {
123 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
124 }
125 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)126 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
127 {
128 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
129 }
130 
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups,u32 flags)131 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
132 					       int level, int prio,
133 					       int max_num_groups, u32 flags)
134 {
135 	struct mlx5_flow_table_attr ft_attr = {};
136 
137 	ft_attr.autogroup.num_reserved_entries = 1;
138 	ft_attr.autogroup.max_num_groups = max_num_groups;
139 	ft_attr.max_fte = NUM_IPSEC_FTE;
140 	ft_attr.level = level;
141 	ft_attr.prio = prio;
142 	ft_attr.flags = flags;
143 
144 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
145 }
146 
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)147 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
148 					 struct mlx5e_ipsec_rx *rx)
149 {
150 	mlx5_del_flow_rules(rx->status_drops.all.rule);
151 	mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
152 	mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
153 }
154 
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)155 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
156 					 struct mlx5e_ipsec_rx *rx)
157 {
158 	mlx5_del_flow_rules(rx->status.rule);
159 
160 	if (rx != ipsec->rx_esw)
161 		return;
162 
163 #ifdef CONFIG_MLX5_ESWITCH
164 	mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
165 #endif
166 }
167 
rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)168 static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
169 					 struct mlx5e_ipsec_rx *rx)
170 {
171 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
172 	struct mlx5_flow_table *ft = rx->ft.status;
173 	struct mlx5_core_dev *mdev = ipsec->mdev;
174 	struct mlx5_flow_destination dest = {};
175 	struct mlx5_flow_act flow_act = {};
176 	struct mlx5_flow_handle *rule;
177 	struct mlx5_fc *flow_counter;
178 	struct mlx5_flow_spec *spec;
179 	int err;
180 
181 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
182 	if (!spec)
183 		return -ENOMEM;
184 
185 	flow_counter = mlx5_fc_create(mdev, true);
186 	if (IS_ERR(flow_counter)) {
187 		err = PTR_ERR(flow_counter);
188 		mlx5_core_err(mdev,
189 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
190 		goto err_cnt;
191 	}
192 	sa_entry->ipsec_rule.auth.fc = flow_counter;
193 
194 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
195 	flow_act.flags = FLOW_ACT_NO_APPEND;
196 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
197 	dest.counter_id = mlx5_fc_id(flow_counter);
198 	if (rx == ipsec->rx_esw)
199 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
200 
201 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
202 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
203 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
204 	MLX5_SET(fte_match_param, spec->match_value,
205 		 misc_parameters_2.metadata_reg_c_2,
206 		 sa_entry->ipsec_obj_id | BIT(31));
207 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
208 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
209 	if (IS_ERR(rule)) {
210 		err = PTR_ERR(rule);
211 		mlx5_core_err(mdev,
212 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
213 		goto err_rule;
214 	}
215 	sa_entry->ipsec_rule.auth.rule = rule;
216 
217 	flow_counter = mlx5_fc_create(mdev, true);
218 	if (IS_ERR(flow_counter)) {
219 		err = PTR_ERR(flow_counter);
220 		mlx5_core_err(mdev,
221 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
222 		goto err_cnt_2;
223 	}
224 	sa_entry->ipsec_rule.trailer.fc = flow_counter;
225 
226 	dest.counter_id = mlx5_fc_id(flow_counter);
227 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
228 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
229 	if (IS_ERR(rule)) {
230 		err = PTR_ERR(rule);
231 		mlx5_core_err(mdev,
232 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
233 		goto err_rule_2;
234 	}
235 	sa_entry->ipsec_rule.trailer.rule = rule;
236 
237 	kvfree(spec);
238 	return 0;
239 
240 err_rule_2:
241 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
242 err_cnt_2:
243 	mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
244 err_rule:
245 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
246 err_cnt:
247 	kvfree(spec);
248 	return err;
249 }
250 
rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)251 static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
252 {
253 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
254 	struct mlx5_flow_table *ft = rx->ft.status;
255 	struct mlx5_core_dev *mdev = ipsec->mdev;
256 	struct mlx5_flow_destination dest = {};
257 	struct mlx5_flow_act flow_act = {};
258 	struct mlx5_flow_handle *rule;
259 	struct mlx5_fc *flow_counter;
260 	struct mlx5_flow_spec *spec;
261 	int err;
262 
263 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
264 	if (!spec)
265 		return -ENOMEM;
266 
267 	flow_counter = mlx5_fc_create(mdev, true);
268 	if (IS_ERR(flow_counter)) {
269 		err = PTR_ERR(flow_counter);
270 		mlx5_core_err(mdev,
271 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
272 		goto err_cnt;
273 	}
274 
275 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
276 	flow_act.flags = FLOW_ACT_NO_APPEND;
277 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
278 	dest.counter_id = mlx5_fc_id(flow_counter);
279 	if (rx == ipsec->rx_esw)
280 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
281 
282 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
283 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
284 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
285 	MLX5_SET(fte_match_param, spec->match_value,  misc_parameters_2.metadata_reg_c_2,
286 		 sa_entry->ipsec_obj_id | BIT(31));
287 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
288 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
289 	if (IS_ERR(rule)) {
290 		err = PTR_ERR(rule);
291 		mlx5_core_err(mdev,
292 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
293 		goto err_rule;
294 	}
295 
296 	sa_entry->ipsec_rule.replay.rule = rule;
297 	sa_entry->ipsec_rule.replay.fc = flow_counter;
298 
299 	kvfree(spec);
300 	return 0;
301 
302 err_rule:
303 	mlx5_fc_destroy(mdev, flow_counter);
304 err_cnt:
305 	kvfree(spec);
306 	return err;
307 }
308 
ipsec_rx_status_drop_all_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)309 static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
310 					   struct mlx5e_ipsec_rx *rx)
311 {
312 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
313 	struct mlx5_flow_table *ft = rx->ft.status;
314 	struct mlx5_core_dev *mdev = ipsec->mdev;
315 	struct mlx5_flow_destination dest = {};
316 	struct mlx5_flow_act flow_act = {};
317 	struct mlx5_flow_handle *rule;
318 	struct mlx5_fc *flow_counter;
319 	struct mlx5_flow_spec *spec;
320 	struct mlx5_flow_group *g;
321 	u32 *flow_group_in;
322 	int err = 0;
323 
324 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
325 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
326 	if (!flow_group_in || !spec) {
327 		err = -ENOMEM;
328 		goto err_out;
329 	}
330 
331 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
332 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
333 	g = mlx5_create_flow_group(ft, flow_group_in);
334 	if (IS_ERR(g)) {
335 		err = PTR_ERR(g);
336 		mlx5_core_err(mdev,
337 			      "Failed to add ipsec rx status drop flow group, err=%d\n", err);
338 		goto err_out;
339 	}
340 
341 	flow_counter = mlx5_fc_create(mdev, false);
342 	if (IS_ERR(flow_counter)) {
343 		err = PTR_ERR(flow_counter);
344 		mlx5_core_err(mdev,
345 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
346 		goto err_cnt;
347 	}
348 
349 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
350 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
351 	dest.counter_id = mlx5_fc_id(flow_counter);
352 	if (rx == ipsec->rx_esw)
353 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
354 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
355 	if (IS_ERR(rule)) {
356 		err = PTR_ERR(rule);
357 		mlx5_core_err(mdev,
358 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
359 		goto err_rule;
360 	}
361 
362 	rx->status_drops.drop_all_group = g;
363 	rx->status_drops.all.rule = rule;
364 	rx->status_drops.all.fc = flow_counter;
365 
366 	kvfree(flow_group_in);
367 	kvfree(spec);
368 	return 0;
369 
370 err_rule:
371 	mlx5_fc_destroy(mdev, flow_counter);
372 err_cnt:
373 	mlx5_destroy_flow_group(g);
374 err_out:
375 	kvfree(flow_group_in);
376 	kvfree(spec);
377 	return err;
378 }
379 
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)380 static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
381 				       struct mlx5e_ipsec_rx *rx,
382 				       struct mlx5_flow_destination *dest)
383 {
384 	struct mlx5_flow_act flow_act = {};
385 	struct mlx5_flow_handle *rule;
386 	struct mlx5_flow_spec *spec;
387 	int err;
388 
389 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
390 	if (!spec)
391 		return -ENOMEM;
392 
393 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
394 			 misc_parameters_2.ipsec_syndrome);
395 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
396 			 misc_parameters_2.metadata_reg_c_4);
397 	MLX5_SET(fte_match_param, spec->match_value,
398 		 misc_parameters_2.ipsec_syndrome, 0);
399 	MLX5_SET(fte_match_param, spec->match_value,
400 		 misc_parameters_2.metadata_reg_c_4, 0);
401 	if (rx == ipsec->rx_esw)
402 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
403 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
404 	flow_act.flags = FLOW_ACT_NO_APPEND;
405 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
406 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
407 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
408 	if (IS_ERR(rule)) {
409 		err = PTR_ERR(rule);
410 		mlx5_core_warn(ipsec->mdev,
411 			       "Failed to add ipsec rx status pass rule, err=%d\n", err);
412 		goto err_rule;
413 	}
414 
415 	rx->status.rule = rule;
416 	kvfree(spec);
417 	return 0;
418 
419 err_rule:
420 	kvfree(spec);
421 	return err;
422 }
423 
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)424 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
425 					 struct mlx5e_ipsec_rx *rx)
426 {
427 	ipsec_rx_status_pass_destroy(ipsec, rx);
428 	ipsec_rx_status_drop_destroy(ipsec, rx);
429 }
430 
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)431 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
432 				       struct mlx5e_ipsec_rx *rx,
433 				       struct mlx5_flow_destination *dest)
434 {
435 	int err;
436 
437 	err = ipsec_rx_status_drop_all_create(ipsec, rx);
438 	if (err)
439 		return err;
440 
441 	err = ipsec_rx_status_pass_create(ipsec, rx, dest);
442 	if (err)
443 		goto err_pass_create;
444 
445 	return 0;
446 
447 err_pass_create:
448 	ipsec_rx_status_drop_destroy(ipsec, rx);
449 	return err;
450 }
451 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)452 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
453 			     struct mlx5_flow_table *ft,
454 			     struct mlx5e_ipsec_miss *miss,
455 			     struct mlx5_flow_destination *dest)
456 {
457 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
458 	MLX5_DECLARE_FLOW_ACT(flow_act);
459 	struct mlx5_flow_spec *spec;
460 	u32 *flow_group_in;
461 	int err = 0;
462 
463 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
464 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
465 	if (!flow_group_in || !spec) {
466 		err = -ENOMEM;
467 		goto out;
468 	}
469 
470 	/* Create miss_group */
471 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
472 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
473 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
474 	if (IS_ERR(miss->group)) {
475 		err = PTR_ERR(miss->group);
476 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
477 			      err);
478 		goto out;
479 	}
480 
481 	/* Create miss rule */
482 	miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
483 	if (IS_ERR(miss->rule)) {
484 		mlx5_destroy_flow_group(miss->group);
485 		err = PTR_ERR(miss->rule);
486 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
487 			      err);
488 		goto out;
489 	}
490 out:
491 	kvfree(flow_group_in);
492 	kvfree(spec);
493 	return err;
494 }
495 
handle_ipsec_rx_bringup(struct mlx5e_ipsec * ipsec,u32 family)496 static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
497 {
498 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
499 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
500 	struct mlx5_flow_destination old_dest, new_dest;
501 
502 	old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
503 					     family2tt(family));
504 
505 	mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
506 				     MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
507 
508 	new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
509 	new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
510 	mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
511 	mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
512 }
513 
handle_ipsec_rx_cleanup(struct mlx5e_ipsec * ipsec,u32 family)514 static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
515 {
516 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
517 	struct mlx5_flow_destination old_dest, new_dest;
518 
519 	old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
520 	old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
521 	new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
522 					     family2tt(family));
523 	mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
524 	mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
525 
526 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
527 }
528 
ipsec_mpv_work_handler(struct work_struct * _work)529 static void ipsec_mpv_work_handler(struct work_struct *_work)
530 {
531 	struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
532 	struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
533 
534 	switch (work->event) {
535 	case MPV_DEVCOM_IPSEC_MASTER_UP:
536 		mutex_lock(&ipsec->tx->ft.mutex);
537 		if (ipsec->tx->ft.refcnt)
538 			mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
539 						     true);
540 		mutex_unlock(&ipsec->tx->ft.mutex);
541 
542 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
543 		if (ipsec->rx_ipv4->ft.refcnt)
544 			handle_ipsec_rx_bringup(ipsec, AF_INET);
545 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
546 
547 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
548 		if (ipsec->rx_ipv6->ft.refcnt)
549 			handle_ipsec_rx_bringup(ipsec, AF_INET6);
550 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
551 		break;
552 	case MPV_DEVCOM_IPSEC_MASTER_DOWN:
553 		mutex_lock(&ipsec->tx->ft.mutex);
554 		if (ipsec->tx->ft.refcnt)
555 			mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
556 		mutex_unlock(&ipsec->tx->ft.mutex);
557 
558 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
559 		if (ipsec->rx_ipv4->ft.refcnt)
560 			handle_ipsec_rx_cleanup(ipsec, AF_INET);
561 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
562 
563 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
564 		if (ipsec->rx_ipv6->ft.refcnt)
565 			handle_ipsec_rx_cleanup(ipsec, AF_INET6);
566 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
567 		break;
568 	}
569 
570 	complete(&work->master_priv->ipsec->comp);
571 }
572 
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)573 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
574 {
575 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
576 
577 	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
578 }
579 
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)580 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
581 		       struct mlx5e_ipsec_rx *rx, u32 family)
582 {
583 	/* disconnect */
584 	if (rx != ipsec->rx_esw)
585 		ipsec_rx_ft_disconnect(ipsec, family);
586 
587 	if (rx->chains) {
588 		ipsec_chains_destroy(rx->chains);
589 	} else {
590 		mlx5_del_flow_rules(rx->pol.rule);
591 		mlx5_destroy_flow_group(rx->pol.group);
592 		mlx5_destroy_flow_table(rx->ft.pol);
593 	}
594 
595 	mlx5_del_flow_rules(rx->sa.rule);
596 	mlx5_destroy_flow_group(rx->sa.group);
597 	mlx5_destroy_flow_table(rx->ft.sa);
598 	if (rx->allow_tunnel_mode)
599 		mlx5_eswitch_unblock_encap(mdev);
600 	mlx5_ipsec_rx_status_destroy(ipsec, rx);
601 	mlx5_destroy_flow_table(rx->ft.status);
602 
603 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
604 }
605 
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)606 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
607 				     struct mlx5e_ipsec_rx *rx,
608 				     u32 family,
609 				     struct mlx5e_ipsec_rx_create_attr *attr)
610 {
611 	if (rx == ipsec->rx_esw) {
612 		/* For packet offload in switchdev mode, RX & TX use FDB namespace */
613 		attr->ns = ipsec->tx_esw->ns;
614 		mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
615 		return;
616 	}
617 
618 	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
619 	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
620 	attr->family = family;
621 	attr->prio = MLX5E_NIC_PRIO;
622 	attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
623 	attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
624 	attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
625 	attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
626 }
627 
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)628 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
629 					 struct mlx5e_ipsec_rx *rx,
630 					 struct mlx5e_ipsec_rx_create_attr *attr,
631 					 struct mlx5_flow_destination *dest)
632 {
633 	struct mlx5_flow_table *ft;
634 	int err;
635 
636 	if (rx == ipsec->rx_esw)
637 		return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
638 
639 	*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
640 	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
641 					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
642 					   attr->prio);
643 	if (err)
644 		return err;
645 
646 	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
647 	if (ft) {
648 		dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
649 		dest->ft = ft;
650 	}
651 
652 	return 0;
653 }
654 
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)655 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
656 				struct mlx5e_ipsec_rx *rx,
657 				struct mlx5e_ipsec_rx_create_attr *attr)
658 {
659 	struct mlx5_flow_destination dest = {};
660 
661 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
662 	dest.ft = rx->ft.pol;
663 	mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
664 }
665 
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)666 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
667 		     struct mlx5e_ipsec_rx *rx, u32 family)
668 {
669 	struct mlx5e_ipsec_rx_create_attr attr;
670 	struct mlx5_flow_destination dest[2];
671 	struct mlx5_flow_table *ft;
672 	u32 flags = 0;
673 	int err;
674 
675 	ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
676 
677 	err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
678 	if (err)
679 		return err;
680 
681 	ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
682 	if (IS_ERR(ft)) {
683 		err = PTR_ERR(ft);
684 		goto err_fs_ft_status;
685 	}
686 	rx->ft.status = ft;
687 
688 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
689 	dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
690 	err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
691 	if (err)
692 		goto err_add;
693 
694 	/* Create FT */
695 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
696 		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
697 	if (rx->allow_tunnel_mode)
698 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
699 	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
700 	if (IS_ERR(ft)) {
701 		err = PTR_ERR(ft);
702 		goto err_fs_ft;
703 	}
704 	rx->ft.sa = ft;
705 
706 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
707 	if (err)
708 		goto err_fs;
709 
710 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
711 		rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
712 						 attr.chains_ns,
713 						 attr.prio,
714 						 attr.pol_level,
715 						 &rx->ft.pol);
716 		if (IS_ERR(rx->chains)) {
717 			err = PTR_ERR(rx->chains);
718 			goto err_pol_ft;
719 		}
720 
721 		goto connect;
722 	}
723 
724 	ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
725 	if (IS_ERR(ft)) {
726 		err = PTR_ERR(ft);
727 		goto err_pol_ft;
728 	}
729 	rx->ft.pol = ft;
730 	memset(dest, 0x00, 2 * sizeof(*dest));
731 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
732 	dest[0].ft = rx->ft.sa;
733 	err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
734 	if (err)
735 		goto err_pol_miss;
736 
737 connect:
738 	/* connect */
739 	if (rx != ipsec->rx_esw)
740 		ipsec_rx_ft_connect(ipsec, rx, &attr);
741 	return 0;
742 
743 err_pol_miss:
744 	mlx5_destroy_flow_table(rx->ft.pol);
745 err_pol_ft:
746 	mlx5_del_flow_rules(rx->sa.rule);
747 	mlx5_destroy_flow_group(rx->sa.group);
748 err_fs:
749 	mlx5_destroy_flow_table(rx->ft.sa);
750 err_fs_ft:
751 	if (rx->allow_tunnel_mode)
752 		mlx5_eswitch_unblock_encap(mdev);
753 	mlx5_ipsec_rx_status_destroy(ipsec, rx);
754 err_add:
755 	mlx5_destroy_flow_table(rx->ft.status);
756 err_fs_ft_status:
757 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
758 	return err;
759 }
760 
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)761 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
762 		  struct mlx5e_ipsec_rx *rx, u32 family)
763 {
764 	int err;
765 
766 	if (rx->ft.refcnt)
767 		goto skip;
768 
769 	err = mlx5_eswitch_block_mode(mdev);
770 	if (err)
771 		return err;
772 
773 	err = rx_create(mdev, ipsec, rx, family);
774 	if (err) {
775 		mlx5_eswitch_unblock_mode(mdev);
776 		return err;
777 	}
778 
779 skip:
780 	rx->ft.refcnt++;
781 	return 0;
782 }
783 
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)784 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
785 		   u32 family)
786 {
787 	if (--rx->ft.refcnt)
788 		return;
789 
790 	rx_destroy(ipsec->mdev, ipsec, rx, family);
791 	mlx5_eswitch_unblock_mode(ipsec->mdev);
792 }
793 
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)794 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
795 					struct mlx5e_ipsec *ipsec, u32 family,
796 					int type)
797 {
798 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
799 	int err;
800 
801 	mutex_lock(&rx->ft.mutex);
802 	err = rx_get(mdev, ipsec, rx, family);
803 	mutex_unlock(&rx->ft.mutex);
804 	if (err)
805 		return ERR_PTR(err);
806 
807 	return rx;
808 }
809 
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)810 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
811 						struct mlx5e_ipsec *ipsec,
812 						u32 family, u32 prio, int type)
813 {
814 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
815 	struct mlx5_flow_table *ft;
816 	int err;
817 
818 	mutex_lock(&rx->ft.mutex);
819 	err = rx_get(mdev, ipsec, rx, family);
820 	if (err)
821 		goto err_get;
822 
823 	ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
824 	if (IS_ERR(ft)) {
825 		err = PTR_ERR(ft);
826 		goto err_get_ft;
827 	}
828 
829 	mutex_unlock(&rx->ft.mutex);
830 	return ft;
831 
832 err_get_ft:
833 	rx_put(ipsec, rx, family);
834 err_get:
835 	mutex_unlock(&rx->ft.mutex);
836 	return ERR_PTR(err);
837 }
838 
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)839 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
840 {
841 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
842 
843 	mutex_lock(&rx->ft.mutex);
844 	rx_put(ipsec, rx, family);
845 	mutex_unlock(&rx->ft.mutex);
846 }
847 
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)848 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
849 {
850 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
851 
852 	mutex_lock(&rx->ft.mutex);
853 	if (rx->chains)
854 		ipsec_chains_put_table(rx->chains, prio);
855 
856 	rx_put(ipsec, rx, family);
857 	mutex_unlock(&rx->ft.mutex);
858 }
859 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)860 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
861 {
862 	struct mlx5_flow_destination dest = {};
863 	struct mlx5_flow_act flow_act = {};
864 	struct mlx5_flow_handle *fte;
865 	struct mlx5_flow_spec *spec;
866 	int err;
867 
868 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
869 	if (!spec)
870 		return -ENOMEM;
871 
872 	/* create fte */
873 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
874 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
875 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
876 	dest.counter_id = mlx5_fc_id(tx->fc->cnt);
877 	fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
878 	if (IS_ERR(fte)) {
879 		err = PTR_ERR(fte);
880 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
881 		goto err_rule;
882 	}
883 
884 	kvfree(spec);
885 	tx->status.rule = fte;
886 	return 0;
887 
888 err_rule:
889 	kvfree(spec);
890 	return err;
891 }
892 
893 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)894 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
895 		       struct mlx5_ipsec_fs *roce)
896 {
897 	mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
898 	if (tx->chains) {
899 		ipsec_chains_destroy(tx->chains);
900 	} else {
901 		mlx5_del_flow_rules(tx->pol.rule);
902 		mlx5_destroy_flow_group(tx->pol.group);
903 		mlx5_destroy_flow_table(tx->ft.pol);
904 	}
905 
906 	if (tx == ipsec->tx_esw) {
907 		mlx5_del_flow_rules(tx->sa.rule);
908 		mlx5_destroy_flow_group(tx->sa.group);
909 	}
910 	mlx5_destroy_flow_table(tx->ft.sa);
911 	if (tx->allow_tunnel_mode)
912 		mlx5_eswitch_unblock_encap(ipsec->mdev);
913 	mlx5_del_flow_rules(tx->status.rule);
914 	mlx5_destroy_flow_table(tx->ft.status);
915 }
916 
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)917 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
918 				     struct mlx5e_ipsec_tx *tx,
919 				     struct mlx5e_ipsec_tx_create_attr *attr)
920 {
921 	if (tx == ipsec->tx_esw) {
922 		mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
923 		return;
924 	}
925 
926 	attr->prio = 0;
927 	attr->pol_level = 0;
928 	attr->sa_level = 1;
929 	attr->cnt_level = 2;
930 	attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
931 }
932 
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)933 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
934 		     struct mlx5_ipsec_fs *roce)
935 {
936 	struct mlx5_core_dev *mdev = ipsec->mdev;
937 	struct mlx5e_ipsec_tx_create_attr attr;
938 	struct mlx5_flow_destination dest = {};
939 	struct mlx5_flow_table *ft;
940 	u32 flags = 0;
941 	int err;
942 
943 	ipsec_tx_create_attr_set(ipsec, tx, &attr);
944 	ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
945 	if (IS_ERR(ft))
946 		return PTR_ERR(ft);
947 	tx->ft.status = ft;
948 
949 	err = ipsec_counter_rule_tx(mdev, tx);
950 	if (err)
951 		goto err_status_rule;
952 
953 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
954 		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
955 	if (tx->allow_tunnel_mode)
956 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
957 	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
958 	if (IS_ERR(ft)) {
959 		err = PTR_ERR(ft);
960 		goto err_sa_ft;
961 	}
962 	tx->ft.sa = ft;
963 
964 	if (tx == ipsec->tx_esw) {
965 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
966 		dest.vport.num = MLX5_VPORT_UPLINK;
967 		err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
968 		if (err)
969 			goto err_sa_miss;
970 		memset(&dest, 0, sizeof(dest));
971 	}
972 
973 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
974 		tx->chains = ipsec_chains_create(
975 			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
976 			&tx->ft.pol);
977 		if (IS_ERR(tx->chains)) {
978 			err = PTR_ERR(tx->chains);
979 			goto err_pol_ft;
980 		}
981 
982 		goto connect_roce;
983 	}
984 
985 	ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
986 	if (IS_ERR(ft)) {
987 		err = PTR_ERR(ft);
988 		goto err_pol_ft;
989 	}
990 	tx->ft.pol = ft;
991 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
992 	dest.ft = tx->ft.sa;
993 	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
994 	if (err) {
995 		mlx5_destroy_flow_table(tx->ft.pol);
996 		goto err_pol_ft;
997 	}
998 
999 connect_roce:
1000 	err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
1001 	if (err)
1002 		goto err_roce;
1003 	return 0;
1004 
1005 err_roce:
1006 	if (tx->chains) {
1007 		ipsec_chains_destroy(tx->chains);
1008 	} else {
1009 		mlx5_del_flow_rules(tx->pol.rule);
1010 		mlx5_destroy_flow_group(tx->pol.group);
1011 		mlx5_destroy_flow_table(tx->ft.pol);
1012 	}
1013 err_pol_ft:
1014 	if (tx == ipsec->tx_esw) {
1015 		mlx5_del_flow_rules(tx->sa.rule);
1016 		mlx5_destroy_flow_group(tx->sa.group);
1017 	}
1018 err_sa_miss:
1019 	mlx5_destroy_flow_table(tx->ft.sa);
1020 err_sa_ft:
1021 	if (tx->allow_tunnel_mode)
1022 		mlx5_eswitch_unblock_encap(mdev);
1023 	mlx5_del_flow_rules(tx->status.rule);
1024 err_status_rule:
1025 	mlx5_destroy_flow_table(tx->ft.status);
1026 	return err;
1027 }
1028 
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)1029 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
1030 				       struct mlx5_flow_table *ft)
1031 {
1032 #ifdef CONFIG_MLX5_ESWITCH
1033 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1034 	struct mlx5e_rep_priv *uplink_rpriv;
1035 	struct mlx5e_priv *priv;
1036 
1037 	esw->offloads.ft_ipsec_tx_pol = ft;
1038 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1039 	priv = netdev_priv(uplink_rpriv->netdev);
1040 	if (!priv->channels.num)
1041 		return;
1042 
1043 	mlx5e_rep_deactivate_channels(priv);
1044 	mlx5e_rep_activate_channels(priv);
1045 #endif
1046 }
1047 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1048 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1049 		  struct mlx5e_ipsec_tx *tx)
1050 {
1051 	int err;
1052 
1053 	if (tx->ft.refcnt)
1054 		goto skip;
1055 
1056 	err = mlx5_eswitch_block_mode(mdev);
1057 	if (err)
1058 		return err;
1059 
1060 	err = tx_create(ipsec, tx, ipsec->roce);
1061 	if (err) {
1062 		mlx5_eswitch_unblock_mode(mdev);
1063 		return err;
1064 	}
1065 
1066 	if (tx == ipsec->tx_esw)
1067 		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
1068 
1069 skip:
1070 	tx->ft.refcnt++;
1071 	return 0;
1072 }
1073 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1074 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
1075 {
1076 	if (--tx->ft.refcnt)
1077 		return;
1078 
1079 	if (tx == ipsec->tx_esw) {
1080 		mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
1081 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
1082 	}
1083 
1084 	tx_destroy(ipsec, tx, ipsec->roce);
1085 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1086 }
1087 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)1088 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
1089 						struct mlx5e_ipsec *ipsec,
1090 						u32 prio, int type)
1091 {
1092 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1093 	struct mlx5_flow_table *ft;
1094 	int err;
1095 
1096 	mutex_lock(&tx->ft.mutex);
1097 	err = tx_get(mdev, ipsec, tx);
1098 	if (err)
1099 		goto err_get;
1100 
1101 	ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
1102 	if (IS_ERR(ft)) {
1103 		err = PTR_ERR(ft);
1104 		goto err_get_ft;
1105 	}
1106 
1107 	mutex_unlock(&tx->ft.mutex);
1108 	return ft;
1109 
1110 err_get_ft:
1111 	tx_put(ipsec, tx);
1112 err_get:
1113 	mutex_unlock(&tx->ft.mutex);
1114 	return ERR_PTR(err);
1115 }
1116 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)1117 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
1118 					struct mlx5e_ipsec *ipsec, int type)
1119 {
1120 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1121 	int err;
1122 
1123 	mutex_lock(&tx->ft.mutex);
1124 	err = tx_get(mdev, ipsec, tx);
1125 	mutex_unlock(&tx->ft.mutex);
1126 	if (err)
1127 		return ERR_PTR(err);
1128 
1129 	return tx;
1130 }
1131 
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)1132 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
1133 {
1134 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1135 
1136 	mutex_lock(&tx->ft.mutex);
1137 	tx_put(ipsec, tx);
1138 	mutex_unlock(&tx->ft.mutex);
1139 }
1140 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)1141 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
1142 {
1143 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1144 
1145 	mutex_lock(&tx->ft.mutex);
1146 	if (tx->chains)
1147 		ipsec_chains_put_table(tx->chains, prio);
1148 
1149 	tx_put(ipsec, tx);
1150 	mutex_unlock(&tx->ft.mutex);
1151 }
1152 
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)1153 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
1154 			    __be32 *daddr)
1155 {
1156 	if (!*saddr && !*daddr)
1157 		return;
1158 
1159 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1160 
1161 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1162 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
1163 
1164 	if (*saddr) {
1165 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1166 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
1167 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1168 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
1169 	}
1170 
1171 	if (*daddr) {
1172 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1173 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
1174 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1175 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1176 	}
1177 }
1178 
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)1179 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
1180 			    __be32 *daddr)
1181 {
1182 	if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
1183 		return;
1184 
1185 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1186 
1187 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1188 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
1189 
1190 	if (!addr6_all_zero(saddr)) {
1191 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1192 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
1193 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1194 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
1195 	}
1196 
1197 	if (!addr6_all_zero(daddr)) {
1198 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1199 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
1200 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1201 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
1202 	}
1203 }
1204 
setup_fte_esp(struct mlx5_flow_spec * spec)1205 static void setup_fte_esp(struct mlx5_flow_spec *spec)
1206 {
1207 	/* ESP header */
1208 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1209 
1210 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1211 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
1212 }
1213 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)1214 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
1215 {
1216 	/* SPI number */
1217 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1218 
1219 	if (encap) {
1220 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1221 				 misc_parameters.inner_esp_spi);
1222 		MLX5_SET(fte_match_param, spec->match_value,
1223 			 misc_parameters.inner_esp_spi, spi);
1224 	} else {
1225 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1226 				 misc_parameters.outer_esp_spi);
1227 		MLX5_SET(fte_match_param, spec->match_value,
1228 			 misc_parameters.outer_esp_spi, spi);
1229 	}
1230 }
1231 
setup_fte_no_frags(struct mlx5_flow_spec * spec)1232 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
1233 {
1234 	/* Non fragmented */
1235 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1236 
1237 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1238 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1239 }
1240 
setup_fte_reg_a(struct mlx5_flow_spec * spec)1241 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1242 {
1243 	/* Add IPsec indicator in metadata_reg_a */
1244 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1245 
1246 	MLX5_SET(fte_match_param, spec->match_criteria,
1247 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1248 	MLX5_SET(fte_match_param, spec->match_value,
1249 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1250 }
1251 
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1252 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1253 {
1254 	/* Pass policy check before choosing this SA */
1255 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1256 
1257 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1258 			 misc_parameters_2.metadata_reg_c_4);
1259 	MLX5_SET(fte_match_param, spec->match_value,
1260 		 misc_parameters_2.metadata_reg_c_4, reqid);
1261 }
1262 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1263 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1264 {
1265 	switch (upspec->proto) {
1266 	case IPPROTO_UDP:
1267 		if (upspec->dport) {
1268 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1269 				 udp_dport, upspec->dport_mask);
1270 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1271 				 udp_dport, upspec->dport);
1272 		}
1273 		if (upspec->sport) {
1274 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1275 				 udp_sport, upspec->sport_mask);
1276 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1277 				 udp_sport, upspec->sport);
1278 		}
1279 		break;
1280 	case IPPROTO_TCP:
1281 		if (upspec->dport) {
1282 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1283 				 tcp_dport, upspec->dport_mask);
1284 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1285 				 tcp_dport, upspec->dport);
1286 		}
1287 		if (upspec->sport) {
1288 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1289 				 tcp_sport, upspec->sport_mask);
1290 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1291 				 tcp_sport, upspec->sport);
1292 		}
1293 		break;
1294 	default:
1295 		return;
1296 	}
1297 
1298 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1299 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1300 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1301 }
1302 
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1303 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1304 						     int type, u8 dir)
1305 {
1306 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1307 		return MLX5_FLOW_NAMESPACE_FDB;
1308 
1309 	if (dir == XFRM_DEV_OFFLOAD_IN)
1310 		return MLX5_FLOW_NAMESPACE_KERNEL;
1311 
1312 	return MLX5_FLOW_NAMESPACE_EGRESS;
1313 }
1314 
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1315 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1316 			       struct mlx5_flow_act *flow_act)
1317 {
1318 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1319 	u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1320 	struct mlx5_core_dev *mdev = ipsec->mdev;
1321 	struct mlx5_modify_hdr *modify_hdr;
1322 	u8 num_of_actions = 1;
1323 
1324 	MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
1325 	switch (dir) {
1326 	case XFRM_DEV_OFFLOAD_IN:
1327 		MLX5_SET(set_action_in, action[0], field,
1328 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1329 
1330 		num_of_actions++;
1331 		MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
1332 		MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
1333 		MLX5_SET(set_action_in, action[1], data, val);
1334 		MLX5_SET(set_action_in, action[1], offset, 0);
1335 		MLX5_SET(set_action_in, action[1], length, 32);
1336 
1337 		if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
1338 			num_of_actions++;
1339 			MLX5_SET(set_action_in, action[2], action_type,
1340 				 MLX5_ACTION_TYPE_SET);
1341 			MLX5_SET(set_action_in, action[2], field,
1342 				 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1343 			MLX5_SET(set_action_in, action[2], data, 0);
1344 			MLX5_SET(set_action_in, action[2], offset, 0);
1345 			MLX5_SET(set_action_in, action[2], length, 32);
1346 		}
1347 		break;
1348 	case XFRM_DEV_OFFLOAD_OUT:
1349 		MLX5_SET(set_action_in, action[0], field,
1350 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1351 		break;
1352 	default:
1353 		return -EINVAL;
1354 	}
1355 
1356 	MLX5_SET(set_action_in, action[0], data, val);
1357 	MLX5_SET(set_action_in, action[0], offset, 0);
1358 	MLX5_SET(set_action_in, action[0], length, 32);
1359 
1360 	modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
1361 	if (IS_ERR(modify_hdr)) {
1362 		mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1363 			      PTR_ERR(modify_hdr));
1364 		return PTR_ERR(modify_hdr);
1365 	}
1366 
1367 	flow_act->modify_hdr = modify_hdr;
1368 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1369 	return 0;
1370 }
1371 
1372 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1373 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1374 			  struct mlx5_accel_esp_xfrm_attrs *attrs,
1375 			  struct mlx5_pkt_reformat_params *reformat_params)
1376 {
1377 	struct ip_esp_hdr *esp_hdr;
1378 	struct ipv6hdr *ipv6hdr;
1379 	struct ethhdr *eth_hdr;
1380 	struct iphdr *iphdr;
1381 	char *reformatbf;
1382 	size_t bfflen;
1383 	void *hdr;
1384 
1385 	bfflen = sizeof(*eth_hdr);
1386 
1387 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1388 		bfflen += sizeof(*esp_hdr) + 8;
1389 
1390 		switch (attrs->family) {
1391 		case AF_INET:
1392 			bfflen += sizeof(*iphdr);
1393 			break;
1394 		case AF_INET6:
1395 			bfflen += sizeof(*ipv6hdr);
1396 			break;
1397 		default:
1398 			return -EINVAL;
1399 		}
1400 	}
1401 
1402 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
1403 	if (!reformatbf)
1404 		return -ENOMEM;
1405 
1406 	eth_hdr = (struct ethhdr *)reformatbf;
1407 	switch (attrs->family) {
1408 	case AF_INET:
1409 		eth_hdr->h_proto = htons(ETH_P_IP);
1410 		break;
1411 	case AF_INET6:
1412 		eth_hdr->h_proto = htons(ETH_P_IPV6);
1413 		break;
1414 	default:
1415 		goto free_reformatbf;
1416 	}
1417 
1418 	ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1419 	ether_addr_copy(eth_hdr->h_source, attrs->smac);
1420 
1421 	switch (attrs->dir) {
1422 	case XFRM_DEV_OFFLOAD_IN:
1423 		reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1424 		break;
1425 	case XFRM_DEV_OFFLOAD_OUT:
1426 		reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1427 		reformat_params->param_0 = attrs->authsize;
1428 
1429 		hdr = reformatbf + sizeof(*eth_hdr);
1430 		switch (attrs->family) {
1431 		case AF_INET:
1432 			iphdr = (struct iphdr *)hdr;
1433 			memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
1434 			memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
1435 			iphdr->version = 4;
1436 			iphdr->ihl = 5;
1437 			iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1438 			iphdr->protocol = IPPROTO_ESP;
1439 			hdr += sizeof(*iphdr);
1440 			break;
1441 		case AF_INET6:
1442 			ipv6hdr = (struct ipv6hdr *)hdr;
1443 			memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
1444 			memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
1445 			ipv6hdr->nexthdr = IPPROTO_ESP;
1446 			ipv6hdr->version = 6;
1447 			ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1448 			hdr += sizeof(*ipv6hdr);
1449 			break;
1450 		default:
1451 			goto free_reformatbf;
1452 		}
1453 
1454 		esp_hdr = (struct ip_esp_hdr *)hdr;
1455 		esp_hdr->spi = htonl(attrs->spi);
1456 		break;
1457 	default:
1458 		goto free_reformatbf;
1459 	}
1460 
1461 	reformat_params->size = bfflen;
1462 	reformat_params->data = reformatbf;
1463 	return 0;
1464 
1465 free_reformatbf:
1466 	kfree(reformatbf);
1467 	return -EINVAL;
1468 }
1469 
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1470 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1471 {
1472 	switch (attrs->dir) {
1473 	case XFRM_DEV_OFFLOAD_IN:
1474 		if (attrs->encap)
1475 			return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1476 		return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1477 	case XFRM_DEV_OFFLOAD_OUT:
1478 		if (attrs->family == AF_INET) {
1479 			if (attrs->encap)
1480 				return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1481 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1482 		}
1483 
1484 		if (attrs->encap)
1485 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1486 		return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1487 	default:
1488 		WARN_ON(true);
1489 	}
1490 
1491 	return -EINVAL;
1492 }
1493 
1494 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1495 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1496 			     struct mlx5_pkt_reformat_params *reformat_params)
1497 {
1498 	struct udphdr *udphdr;
1499 	char *reformatbf;
1500 	size_t bfflen;
1501 	__be32 spi;
1502 	void *hdr;
1503 
1504 	reformat_params->type = get_reformat_type(attrs);
1505 	if (reformat_params->type < 0)
1506 		return reformat_params->type;
1507 
1508 	switch (attrs->dir) {
1509 	case XFRM_DEV_OFFLOAD_IN:
1510 		break;
1511 	case XFRM_DEV_OFFLOAD_OUT:
1512 		bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1513 		if (attrs->encap)
1514 			bfflen += sizeof(*udphdr);
1515 
1516 		reformatbf = kzalloc(bfflen, GFP_KERNEL);
1517 		if (!reformatbf)
1518 			return -ENOMEM;
1519 
1520 		hdr = reformatbf;
1521 		if (attrs->encap) {
1522 			udphdr = (struct udphdr *)reformatbf;
1523 			udphdr->source = attrs->sport;
1524 			udphdr->dest = attrs->dport;
1525 			hdr += sizeof(*udphdr);
1526 		}
1527 
1528 		/* convert to network format */
1529 		spi = htonl(attrs->spi);
1530 		memcpy(hdr, &spi, sizeof(spi));
1531 
1532 		reformat_params->param_0 = attrs->authsize;
1533 		reformat_params->size = bfflen;
1534 		reformat_params->data = reformatbf;
1535 		break;
1536 	default:
1537 		return -EINVAL;
1538 	}
1539 
1540 	return 0;
1541 }
1542 
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1543 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1544 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
1545 			      struct mlx5_flow_act *flow_act)
1546 {
1547 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1548 								attrs->dir);
1549 	struct mlx5_pkt_reformat_params reformat_params = {};
1550 	struct mlx5_core_dev *mdev = ipsec->mdev;
1551 	struct mlx5_pkt_reformat *pkt_reformat;
1552 	int ret;
1553 
1554 	switch (attrs->mode) {
1555 	case XFRM_MODE_TRANSPORT:
1556 		ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1557 		break;
1558 	case XFRM_MODE_TUNNEL:
1559 		ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1560 		break;
1561 	default:
1562 		ret = -EINVAL;
1563 	}
1564 
1565 	if (ret)
1566 		return ret;
1567 
1568 	pkt_reformat =
1569 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1570 	kfree(reformat_params.data);
1571 	if (IS_ERR(pkt_reformat))
1572 		return PTR_ERR(pkt_reformat);
1573 
1574 	flow_act->pkt_reformat = pkt_reformat;
1575 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1576 	return 0;
1577 }
1578 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1579 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1580 {
1581 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1582 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1583 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1584 	struct mlx5_flow_destination dest[2];
1585 	struct mlx5_flow_act flow_act = {};
1586 	struct mlx5_flow_handle *rule;
1587 	struct mlx5_flow_spec *spec;
1588 	struct mlx5e_ipsec_rx *rx;
1589 	struct mlx5_fc *counter;
1590 	int err = 0;
1591 
1592 	rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
1593 	if (IS_ERR(rx))
1594 		return PTR_ERR(rx);
1595 
1596 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1597 	if (!spec) {
1598 		err = -ENOMEM;
1599 		goto err_alloc;
1600 	}
1601 
1602 	if (attrs->family == AF_INET)
1603 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1604 	else
1605 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1606 
1607 	setup_fte_spi(spec, attrs->spi, attrs->encap);
1608 	if (!attrs->encap)
1609 		setup_fte_esp(spec);
1610 	setup_fte_no_frags(spec);
1611 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1612 
1613 	if (!attrs->drop) {
1614 		if (rx != ipsec->rx_esw)
1615 			err = setup_modify_header(ipsec, attrs->type,
1616 						  sa_entry->ipsec_obj_id | BIT(31),
1617 						  XFRM_DEV_OFFLOAD_IN, &flow_act);
1618 		else
1619 			err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
1620 
1621 		if (err)
1622 			goto err_mod_header;
1623 	}
1624 
1625 	switch (attrs->type) {
1626 	case XFRM_DEV_OFFLOAD_PACKET:
1627 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1628 		if (err)
1629 			goto err_pkt_reformat;
1630 		break;
1631 	default:
1632 		break;
1633 	}
1634 
1635 	counter = mlx5_fc_create(mdev, true);
1636 	if (IS_ERR(counter)) {
1637 		err = PTR_ERR(counter);
1638 		goto err_add_cnt;
1639 	}
1640 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1641 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1642 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1643 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1644 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1645 	if (attrs->drop)
1646 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1647 	else
1648 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1649 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1650 	dest[0].ft = rx->ft.status;
1651 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1652 	dest[1].counter_id = mlx5_fc_id(counter);
1653 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
1654 	if (IS_ERR(rule)) {
1655 		err = PTR_ERR(rule);
1656 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
1657 		goto err_add_flow;
1658 	}
1659 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
1660 		err = rx_add_rule_drop_replay(sa_entry, rx);
1661 	if (err)
1662 		goto err_add_replay;
1663 
1664 	err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
1665 	if (err)
1666 		goto err_drop_reason;
1667 
1668 	kvfree(spec);
1669 
1670 	sa_entry->ipsec_rule.rule = rule;
1671 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1672 	sa_entry->ipsec_rule.fc = counter;
1673 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1674 	return 0;
1675 
1676 err_drop_reason:
1677 	if (sa_entry->ipsec_rule.replay.rule) {
1678 		mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
1679 		mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
1680 	}
1681 err_add_replay:
1682 	mlx5_del_flow_rules(rule);
1683 err_add_flow:
1684 	mlx5_fc_destroy(mdev, counter);
1685 err_add_cnt:
1686 	if (flow_act.pkt_reformat)
1687 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1688 err_pkt_reformat:
1689 	if (flow_act.modify_hdr)
1690 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1691 err_mod_header:
1692 	kvfree(spec);
1693 err_alloc:
1694 	rx_ft_put(ipsec, attrs->family, attrs->type);
1695 	return err;
1696 }
1697 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1698 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1699 {
1700 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1701 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1702 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1703 	struct mlx5_flow_destination dest[2];
1704 	struct mlx5_flow_act flow_act = {};
1705 	struct mlx5_flow_handle *rule;
1706 	struct mlx5_flow_spec *spec;
1707 	struct mlx5e_ipsec_tx *tx;
1708 	struct mlx5_fc *counter;
1709 	int err;
1710 
1711 	tx = tx_ft_get(mdev, ipsec, attrs->type);
1712 	if (IS_ERR(tx))
1713 		return PTR_ERR(tx);
1714 
1715 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1716 	if (!spec) {
1717 		err = -ENOMEM;
1718 		goto err_alloc;
1719 	}
1720 
1721 	if (attrs->family == AF_INET)
1722 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1723 	else
1724 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1725 
1726 	setup_fte_no_frags(spec);
1727 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1728 
1729 	switch (attrs->type) {
1730 	case XFRM_DEV_OFFLOAD_CRYPTO:
1731 		setup_fte_spi(spec, attrs->spi, false);
1732 		setup_fte_esp(spec);
1733 		setup_fte_reg_a(spec);
1734 		break;
1735 	case XFRM_DEV_OFFLOAD_PACKET:
1736 		if (attrs->reqid)
1737 			setup_fte_reg_c4(spec, attrs->reqid);
1738 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1739 		if (err)
1740 			goto err_pkt_reformat;
1741 		break;
1742 	default:
1743 		break;
1744 	}
1745 
1746 	counter = mlx5_fc_create(mdev, true);
1747 	if (IS_ERR(counter)) {
1748 		err = PTR_ERR(counter);
1749 		goto err_add_cnt;
1750 	}
1751 
1752 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1753 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1754 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1755 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1756 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1757 	if (attrs->drop)
1758 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1759 	else
1760 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1761 
1762 	dest[0].ft = tx->ft.status;
1763 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1764 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1765 	dest[1].counter_id = mlx5_fc_id(counter);
1766 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
1767 	if (IS_ERR(rule)) {
1768 		err = PTR_ERR(rule);
1769 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1770 		goto err_add_flow;
1771 	}
1772 
1773 	kvfree(spec);
1774 	sa_entry->ipsec_rule.rule = rule;
1775 	sa_entry->ipsec_rule.fc = counter;
1776 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1777 	return 0;
1778 
1779 err_add_flow:
1780 	mlx5_fc_destroy(mdev, counter);
1781 err_add_cnt:
1782 	if (flow_act.pkt_reformat)
1783 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1784 err_pkt_reformat:
1785 	kvfree(spec);
1786 err_alloc:
1787 	tx_ft_put(ipsec, attrs->type);
1788 	return err;
1789 }
1790 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1791 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1792 {
1793 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1794 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1795 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1796 	struct mlx5_flow_destination dest[2] = {};
1797 	struct mlx5_flow_act flow_act = {};
1798 	struct mlx5_flow_handle *rule;
1799 	struct mlx5_flow_spec *spec;
1800 	struct mlx5_flow_table *ft;
1801 	struct mlx5e_ipsec_tx *tx;
1802 	int err, dstn = 0;
1803 
1804 	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
1805 	if (IS_ERR(ft))
1806 		return PTR_ERR(ft);
1807 
1808 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1809 	if (!spec) {
1810 		err = -ENOMEM;
1811 		goto err_alloc;
1812 	}
1813 
1814 	tx = ipsec_tx(ipsec, attrs->type);
1815 	if (attrs->family == AF_INET)
1816 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1817 	else
1818 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1819 
1820 	setup_fte_no_frags(spec);
1821 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1822 
1823 	switch (attrs->action) {
1824 	case XFRM_POLICY_ALLOW:
1825 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1826 		if (!attrs->reqid)
1827 			break;
1828 
1829 		err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
1830 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
1831 		if (err)
1832 			goto err_mod_header;
1833 		break;
1834 	case XFRM_POLICY_BLOCK:
1835 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1836 				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1837 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1838 		dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
1839 		dstn++;
1840 		break;
1841 	default:
1842 		WARN_ON(true);
1843 		err = -EINVAL;
1844 		goto err_mod_header;
1845 	}
1846 
1847 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1848 	if (tx == ipsec->tx_esw && tx->chains)
1849 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1850 	dest[dstn].ft = tx->ft.sa;
1851 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1852 	dstn++;
1853 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1854 	if (IS_ERR(rule)) {
1855 		err = PTR_ERR(rule);
1856 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1857 		goto err_action;
1858 	}
1859 
1860 	kvfree(spec);
1861 	pol_entry->ipsec_rule.rule = rule;
1862 	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1863 	return 0;
1864 
1865 err_action:
1866 	if (flow_act.modify_hdr)
1867 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1868 err_mod_header:
1869 	kvfree(spec);
1870 err_alloc:
1871 	tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
1872 	return err;
1873 }
1874 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1875 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1876 {
1877 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1878 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1879 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1880 	struct mlx5_flow_destination dest[2];
1881 	struct mlx5_flow_act flow_act = {};
1882 	struct mlx5_flow_handle *rule;
1883 	struct mlx5_flow_spec *spec;
1884 	struct mlx5_flow_table *ft;
1885 	struct mlx5e_ipsec_rx *rx;
1886 	int err, dstn = 0;
1887 
1888 	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
1889 			      attrs->type);
1890 	if (IS_ERR(ft))
1891 		return PTR_ERR(ft);
1892 
1893 	rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
1894 
1895 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1896 	if (!spec) {
1897 		err = -ENOMEM;
1898 		goto err_alloc;
1899 	}
1900 
1901 	if (attrs->family == AF_INET)
1902 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1903 	else
1904 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1905 
1906 	setup_fte_no_frags(spec);
1907 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1908 
1909 	switch (attrs->action) {
1910 	case XFRM_POLICY_ALLOW:
1911 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1912 		break;
1913 	case XFRM_POLICY_BLOCK:
1914 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1915 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1916 		dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
1917 		dstn++;
1918 		break;
1919 	default:
1920 		WARN_ON(true);
1921 		err = -EINVAL;
1922 		goto err_action;
1923 	}
1924 
1925 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1926 	if (rx == ipsec->rx_esw && rx->chains)
1927 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1928 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1929 	dest[dstn].ft = rx->ft.sa;
1930 	dstn++;
1931 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1932 	if (IS_ERR(rule)) {
1933 		err = PTR_ERR(rule);
1934 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
1935 		goto err_action;
1936 	}
1937 
1938 	kvfree(spec);
1939 	pol_entry->ipsec_rule.rule = rule;
1940 	return 0;
1941 
1942 err_action:
1943 	kvfree(spec);
1944 err_alloc:
1945 	rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
1946 	return err;
1947 }
1948 
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)1949 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
1950 					    struct mlx5e_ipsec_fc *fc)
1951 {
1952 	mlx5_fc_destroy(mdev, fc->drop);
1953 	mlx5_fc_destroy(mdev, fc->cnt);
1954 	kfree(fc);
1955 }
1956 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1957 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1958 {
1959 	struct mlx5_core_dev *mdev = ipsec->mdev;
1960 
1961 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1962 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1963 	if (ipsec->is_uplink_rep) {
1964 		ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
1965 		ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1966 	}
1967 }
1968 
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)1969 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
1970 {
1971 	struct mlx5e_ipsec_fc *fc;
1972 	struct mlx5_fc *counter;
1973 	int err;
1974 
1975 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
1976 	if (!fc)
1977 		return ERR_PTR(-ENOMEM);
1978 
1979 	counter = mlx5_fc_create(mdev, false);
1980 	if (IS_ERR(counter)) {
1981 		err = PTR_ERR(counter);
1982 		goto err_cnt;
1983 	}
1984 	fc->cnt = counter;
1985 
1986 	counter = mlx5_fc_create(mdev, false);
1987 	if (IS_ERR(counter)) {
1988 		err = PTR_ERR(counter);
1989 		goto err_drop;
1990 	}
1991 	fc->drop = counter;
1992 
1993 	return fc;
1994 
1995 err_drop:
1996 	mlx5_fc_destroy(mdev, fc->cnt);
1997 err_cnt:
1998 	kfree(fc);
1999 	return ERR_PTR(err);
2000 }
2001 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)2002 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
2003 {
2004 	struct mlx5_core_dev *mdev = ipsec->mdev;
2005 	struct mlx5e_ipsec_fc *fc;
2006 	int err;
2007 
2008 	fc = ipsec_fs_init_single_counter(mdev);
2009 	if (IS_ERR(fc)) {
2010 		err = PTR_ERR(fc);
2011 		goto err_rx_cnt;
2012 	}
2013 	ipsec->rx_ipv4->fc = fc;
2014 
2015 	fc = ipsec_fs_init_single_counter(mdev);
2016 	if (IS_ERR(fc)) {
2017 		err = PTR_ERR(fc);
2018 		goto err_tx_cnt;
2019 	}
2020 	ipsec->tx->fc = fc;
2021 
2022 	if (ipsec->is_uplink_rep) {
2023 		fc = ipsec_fs_init_single_counter(mdev);
2024 		if (IS_ERR(fc)) {
2025 			err = PTR_ERR(fc);
2026 			goto err_rx_esw_cnt;
2027 		}
2028 		ipsec->rx_esw->fc = fc;
2029 
2030 		fc = ipsec_fs_init_single_counter(mdev);
2031 		if (IS_ERR(fc)) {
2032 			err = PTR_ERR(fc);
2033 			goto err_tx_esw_cnt;
2034 		}
2035 		ipsec->tx_esw->fc = fc;
2036 	}
2037 
2038 	/* Both IPv4 and IPv6 point to same flow counters struct. */
2039 	ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
2040 	return 0;
2041 
2042 err_tx_esw_cnt:
2043 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2044 err_rx_esw_cnt:
2045 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2046 err_tx_cnt:
2047 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2048 err_rx_cnt:
2049 	return err;
2050 }
2051 
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)2052 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
2053 {
2054 	struct mlx5_core_dev *mdev = priv->mdev;
2055 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2056 	struct mlx5e_ipsec_hw_stats *stats;
2057 	struct mlx5e_ipsec_fc *fc;
2058 	u64 packets, bytes;
2059 
2060 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
2061 
2062 	stats->ipsec_rx_pkts = 0;
2063 	stats->ipsec_rx_bytes = 0;
2064 	stats->ipsec_rx_drop_pkts = 0;
2065 	stats->ipsec_rx_drop_bytes = 0;
2066 	stats->ipsec_tx_pkts = 0;
2067 	stats->ipsec_tx_bytes = 0;
2068 	stats->ipsec_tx_drop_pkts = 0;
2069 	stats->ipsec_tx_drop_bytes = 0;
2070 
2071 	fc = ipsec->rx_ipv4->fc;
2072 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
2073 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
2074 		      &stats->ipsec_rx_drop_bytes);
2075 
2076 	fc = ipsec->tx->fc;
2077 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
2078 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
2079 		      &stats->ipsec_tx_drop_bytes);
2080 
2081 	if (ipsec->is_uplink_rep) {
2082 		fc = ipsec->rx_esw->fc;
2083 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2084 			stats->ipsec_rx_pkts += packets;
2085 			stats->ipsec_rx_bytes += bytes;
2086 		}
2087 
2088 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2089 			stats->ipsec_rx_drop_pkts += packets;
2090 			stats->ipsec_rx_drop_bytes += bytes;
2091 		}
2092 
2093 		fc = ipsec->tx_esw->fc;
2094 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2095 			stats->ipsec_tx_pkts += packets;
2096 			stats->ipsec_tx_bytes += bytes;
2097 		}
2098 
2099 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2100 			stats->ipsec_tx_drop_pkts += packets;
2101 			stats->ipsec_tx_drop_bytes += bytes;
2102 		}
2103 	}
2104 }
2105 
2106 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2107 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2108 {
2109 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
2110 	int err = 0;
2111 
2112 	if (esw) {
2113 		err = mlx5_esw_lock(esw);
2114 		if (err)
2115 			return err;
2116 	}
2117 
2118 	if (mdev->num_block_ipsec) {
2119 		err = -EBUSY;
2120 		goto unlock;
2121 	}
2122 
2123 	mdev->num_block_tc++;
2124 
2125 unlock:
2126 	if (esw)
2127 		mlx5_esw_unlock(esw);
2128 
2129 	return err;
2130 }
2131 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2132 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2133 {
2134 	if (mdev->num_block_ipsec)
2135 		return -EBUSY;
2136 
2137 	mdev->num_block_tc++;
2138 	return 0;
2139 }
2140 #endif
2141 
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)2142 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
2143 {
2144 	mdev->num_block_tc--;
2145 }
2146 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2147 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2148 {
2149 	int err;
2150 
2151 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
2152 		err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
2153 		if (err)
2154 			return err;
2155 	}
2156 
2157 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2158 		err = tx_add_rule(sa_entry);
2159 	else
2160 		err = rx_add_rule(sa_entry);
2161 
2162 	if (err)
2163 		goto err_out;
2164 
2165 	return 0;
2166 
2167 err_out:
2168 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2169 		mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
2170 	return err;
2171 }
2172 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2173 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2174 {
2175 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
2176 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2177 
2178 	mlx5_del_flow_rules(ipsec_rule->rule);
2179 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
2180 	if (ipsec_rule->pkt_reformat)
2181 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
2182 
2183 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2184 		mlx5e_ipsec_unblock_tc_offload(mdev);
2185 
2186 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
2187 		tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
2188 		return;
2189 	}
2190 
2191 	if (ipsec_rule->modify_hdr)
2192 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2193 
2194 	mlx5_del_flow_rules(ipsec_rule->trailer.rule);
2195 	mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
2196 
2197 	mlx5_del_flow_rules(ipsec_rule->auth.rule);
2198 	mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
2199 
2200 	if (ipsec_rule->replay.rule) {
2201 		mlx5_del_flow_rules(ipsec_rule->replay.rule);
2202 		mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
2203 	}
2204 	mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
2205 	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
2206 }
2207 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2208 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2209 {
2210 	int err;
2211 
2212 	err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
2213 	if (err)
2214 		return err;
2215 
2216 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2217 		err = tx_add_policy(pol_entry);
2218 	else
2219 		err = rx_add_policy(pol_entry);
2220 
2221 	if (err)
2222 		goto err_out;
2223 
2224 	return 0;
2225 
2226 err_out:
2227 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2228 	return err;
2229 }
2230 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2231 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2232 {
2233 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
2234 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2235 
2236 	mlx5_del_flow_rules(ipsec_rule->rule);
2237 
2238 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2239 
2240 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
2241 		rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
2242 				 pol_entry->attrs.prio, pol_entry->attrs.type);
2243 		return;
2244 	}
2245 
2246 	if (ipsec_rule->modify_hdr)
2247 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2248 
2249 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
2250 }
2251 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2252 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2253 {
2254 	if (!ipsec->tx)
2255 		return;
2256 
2257 	if (ipsec->roce)
2258 		mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
2259 
2260 	ipsec_fs_destroy_counters(ipsec);
2261 	mutex_destroy(&ipsec->tx->ft.mutex);
2262 	WARN_ON(ipsec->tx->ft.refcnt);
2263 	kfree(ipsec->tx);
2264 
2265 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2266 	WARN_ON(ipsec->rx_ipv4->ft.refcnt);
2267 	kfree(ipsec->rx_ipv4);
2268 
2269 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2270 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
2271 	kfree(ipsec->rx_ipv6);
2272 
2273 	if (ipsec->is_uplink_rep) {
2274 		xa_destroy(&ipsec->ipsec_obj_id_map);
2275 
2276 		mutex_destroy(&ipsec->tx_esw->ft.mutex);
2277 		WARN_ON(ipsec->tx_esw->ft.refcnt);
2278 		kfree(ipsec->tx_esw);
2279 
2280 		mutex_destroy(&ipsec->rx_esw->ft.mutex);
2281 		WARN_ON(ipsec->rx_esw->ft.refcnt);
2282 		kfree(ipsec->rx_esw);
2283 	}
2284 }
2285 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec,struct mlx5_devcom_comp_dev ** devcom)2286 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
2287 			      struct mlx5_devcom_comp_dev **devcom)
2288 {
2289 	struct mlx5_core_dev *mdev = ipsec->mdev;
2290 	struct mlx5_flow_namespace *ns, *ns_esw;
2291 	int err = -ENOMEM;
2292 
2293 	ns = mlx5_get_flow_namespace(ipsec->mdev,
2294 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2295 	if (!ns)
2296 		return -EOPNOTSUPP;
2297 
2298 	if (ipsec->is_uplink_rep) {
2299 		ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2300 		if (!ns_esw)
2301 			return -EOPNOTSUPP;
2302 
2303 		ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2304 		if (!ipsec->tx_esw)
2305 			return -ENOMEM;
2306 
2307 		ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2308 		if (!ipsec->rx_esw)
2309 			goto err_rx_esw;
2310 	}
2311 
2312 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2313 	if (!ipsec->tx)
2314 		goto err_tx;
2315 
2316 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2317 	if (!ipsec->rx_ipv4)
2318 		goto err_rx_ipv4;
2319 
2320 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2321 	if (!ipsec->rx_ipv6)
2322 		goto err_rx_ipv6;
2323 
2324 	err = ipsec_fs_init_counters(ipsec);
2325 	if (err)
2326 		goto err_counters;
2327 
2328 	mutex_init(&ipsec->tx->ft.mutex);
2329 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
2330 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
2331 	ipsec->tx->ns = ns;
2332 
2333 	if (ipsec->is_uplink_rep) {
2334 		mutex_init(&ipsec->tx_esw->ft.mutex);
2335 		mutex_init(&ipsec->rx_esw->ft.mutex);
2336 		ipsec->tx_esw->ns = ns_esw;
2337 		xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2338 	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2339 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
2340 	} else {
2341 		mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
2342 	}
2343 
2344 	return 0;
2345 
2346 err_counters:
2347 	kfree(ipsec->rx_ipv6);
2348 err_rx_ipv6:
2349 	kfree(ipsec->rx_ipv4);
2350 err_rx_ipv4:
2351 	kfree(ipsec->tx);
2352 err_tx:
2353 	kfree(ipsec->rx_esw);
2354 err_rx_esw:
2355 	kfree(ipsec->tx_esw);
2356 	return err;
2357 }
2358 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2359 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2360 {
2361 	struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2362 	int err;
2363 
2364 	memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2365 	memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2366 
2367 	err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2368 	if (err)
2369 		return;
2370 
2371 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2372 	memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2373 }
2374 
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)2375 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
2376 {
2377 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2378 	struct mlx5e_ipsec_rx *rx;
2379 	struct mlx5e_ipsec_tx *tx;
2380 
2381 	rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
2382 	tx = ipsec_tx(sa_entry->ipsec, attrs->type);
2383 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2384 		return tx->allow_tunnel_mode;
2385 
2386 	return rx->allow_tunnel_mode;
2387 }
2388 
mlx5e_ipsec_handle_mpv_event(int event,struct mlx5e_priv * slave_priv,struct mlx5e_priv * master_priv)2389 void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
2390 				  struct mlx5e_priv *master_priv)
2391 {
2392 	struct mlx5e_ipsec_mpv_work *work;
2393 
2394 	reinit_completion(&master_priv->ipsec->comp);
2395 
2396 	if (!slave_priv->ipsec) {
2397 		complete(&master_priv->ipsec->comp);
2398 		return;
2399 	}
2400 
2401 	work = &slave_priv->ipsec->mpv_work;
2402 
2403 	INIT_WORK(&work->work, ipsec_mpv_work_handler);
2404 	work->event = event;
2405 	work->slave_priv = slave_priv;
2406 	work->master_priv = master_priv;
2407 	queue_work(slave_priv->ipsec->wq, &work->work);
2408 }
2409 
mlx5e_ipsec_send_event(struct mlx5e_priv * priv,int event)2410 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
2411 {
2412 	if (!priv->ipsec)
2413 		return; /* IPsec not supported */
2414 
2415 	mlx5_devcom_send_event(priv->devcom, event, event, priv);
2416 	wait_for_completion(&priv->ipsec->comp);
2417 }
2418