1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <net/netevent.h>
39 
40 #include "en.h"
41 #include "eswitch.h"
42 #include "ipsec.h"
43 #include "ipsec_rxtx.h"
44 #include "en_rep.h"
45 
46 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
47 #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
48 
to_ipsec_sa_entry(struct xfrm_state * x)49 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
50 {
51 	return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
52 }
53 
to_ipsec_pol_entry(struct xfrm_policy * x)54 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
55 {
56 	return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
57 }
58 
mlx5e_ipsec_handle_sw_limits(struct work_struct * _work)59 static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
60 {
61 	struct mlx5e_ipsec_dwork *dwork =
62 		container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
63 	struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
64 	struct xfrm_state *x = sa_entry->x;
65 
66 	if (sa_entry->attrs.drop)
67 		return;
68 
69 	spin_lock_bh(&x->lock);
70 	if (x->km.state == XFRM_STATE_EXPIRED) {
71 		sa_entry->attrs.drop = true;
72 		spin_unlock_bh(&x->lock);
73 
74 		mlx5e_accel_ipsec_fs_modify(sa_entry);
75 		return;
76 	}
77 
78 	if (x->km.state != XFRM_STATE_VALID) {
79 		spin_unlock_bh(&x->lock);
80 		return;
81 	}
82 
83 	xfrm_state_check_expire(x);
84 	spin_unlock_bh(&x->lock);
85 
86 	queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
87 			   MLX5_IPSEC_RESCHED);
88 }
89 
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)90 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
91 {
92 	struct xfrm_state *x = sa_entry->x;
93 	u32 seq_bottom = 0;
94 	u32 esn, esn_msb;
95 	u8 overlap;
96 
97 	switch (x->xso.type) {
98 	case XFRM_DEV_OFFLOAD_PACKET:
99 		switch (x->xso.dir) {
100 		case XFRM_DEV_OFFLOAD_IN:
101 			esn = x->replay_esn->seq;
102 			esn_msb = x->replay_esn->seq_hi;
103 			break;
104 		case XFRM_DEV_OFFLOAD_OUT:
105 			esn = x->replay_esn->oseq;
106 			esn_msb = x->replay_esn->oseq_hi;
107 			break;
108 		default:
109 			WARN_ON(true);
110 			return false;
111 		}
112 		break;
113 	case XFRM_DEV_OFFLOAD_CRYPTO:
114 		/* Already parsed by XFRM core */
115 		esn = x->replay_esn->seq;
116 		break;
117 	default:
118 		WARN_ON(true);
119 		return false;
120 	}
121 
122 	overlap = sa_entry->esn_state.overlap;
123 
124 	if (esn >= x->replay_esn->replay_window)
125 		seq_bottom = esn - x->replay_esn->replay_window + 1;
126 
127 	if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
128 		esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
129 
130 	if (sa_entry->esn_state.esn_msb)
131 		sa_entry->esn_state.esn = esn;
132 	else
133 		/* According to RFC4303, section "3.3.3. Sequence Number Generation",
134 		 * the first packet sent using a given SA will contain a sequence
135 		 * number of 1.
136 		 */
137 		sa_entry->esn_state.esn = max_t(u32, esn, 1);
138 	sa_entry->esn_state.esn_msb = esn_msb;
139 
140 	if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
141 		sa_entry->esn_state.overlap = 0;
142 		return true;
143 	} else if (unlikely(!overlap &&
144 			    (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
145 		sa_entry->esn_state.overlap = 1;
146 		return true;
147 	}
148 
149 	return false;
150 }
151 
mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)152 static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
153 				    struct mlx5_accel_esp_xfrm_attrs *attrs)
154 {
155 	struct xfrm_state *x = sa_entry->x;
156 	s64 start_value, n;
157 
158 	attrs->lft.hard_packet_limit = x->lft.hard_packet_limit;
159 	attrs->lft.soft_packet_limit = x->lft.soft_packet_limit;
160 	if (x->lft.soft_packet_limit == XFRM_INF)
161 		return;
162 
163 	/* Compute hard limit initial value and number of rounds.
164 	 *
165 	 * The counting pattern of hardware counter goes:
166 	 *                value  -> 2^31-1
167 	 *      2^31  | (2^31-1) -> 2^31-1
168 	 *      2^31  | (2^31-1) -> 2^31-1
169 	 *      [..]
170 	 *      2^31  | (2^31-1) -> 0
171 	 *
172 	 * The pattern is created by using an ASO operation to atomically set
173 	 * bit 31 after the down counter clears bit 31. This is effectively an
174 	 * atomic addition of 2**31 to the counter.
175 	 *
176 	 * We wish to configure the counter, within the above pattern, so that
177 	 * when it reaches 0, it has hit the hard limit. This is defined by this
178 	 * system of equations:
179 	 *
180 	 *      hard_limit == start_value + n * 2^31
181 	 *      n >= 0
182 	 *      start_value < 2^32, start_value >= 0
183 	 *
184 	 * These equations are not single-solution, there are often two choices:
185 	 *      hard_limit == start_value + n * 2^31
186 	 *      hard_limit == (start_value+2^31) + (n-1) * 2^31
187 	 *
188 	 * The algorithm selects the solution that keeps the counter value
189 	 * above 2^31 until the final iteration.
190 	 */
191 
192 	/* Start by estimating n and compute start_value */
193 	n = attrs->lft.hard_packet_limit / BIT_ULL(31);
194 	start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
195 
196 	/* Choose the best of the two solutions: */
197 	if (n >= 1)
198 		n -= 1;
199 
200 	/* Computed values solve the system of equations: */
201 	start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
202 
203 	/* The best solution means: when there are multiple iterations we must
204 	 * start above 2^31 and count down to 2**31 to get the interrupt.
205 	 */
206 	attrs->lft.hard_packet_limit = lower_32_bits(start_value);
207 	attrs->lft.numb_rounds_hard = (u64)n;
208 
209 	/* Compute soft limit initial value and number of rounds.
210 	 *
211 	 * The soft_limit is achieved by adjusting the counter's
212 	 * interrupt_value. This is embedded in the counting pattern created by
213 	 * hard packet calculations above.
214 	 *
215 	 * We wish to compute the interrupt_value for the soft_limit. This is
216 	 * defined by this system of equations:
217 	 *
218 	 *      soft_limit == start_value - soft_value + n * 2^31
219 	 *      n >= 0
220 	 *      soft_value < 2^32, soft_value >= 0
221 	 *      for n == 0 start_value > soft_value
222 	 *
223 	 * As with compute_hard_n_value() the equations are not single-solution.
224 	 * The algorithm selects the solution that has:
225 	 *      2^30 <= soft_limit < 2^31 + 2^30
226 	 * for the interior iterations, which guarantees a large guard band
227 	 * around the counter hard limit and next interrupt.
228 	 */
229 
230 	/* Start by estimating n and compute soft_value */
231 	n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31);
232 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) -
233 		      x->lft.soft_packet_limit;
234 
235 	/* Compare against constraints and adjust n */
236 	if (n < 0)
237 		n = 0;
238 	else if (start_value >= BIT_ULL(32))
239 		n -= 1;
240 	else if (start_value < 0)
241 		n += 1;
242 
243 	/* Choose the best of the two solutions: */
244 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
245 	if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30))
246 		n += 1;
247 
248 	/* Note that the upper limit of soft_value happens naturally because we
249 	 * always select the lowest soft_value.
250 	 */
251 
252 	/* Computed values solve the system of equations: */
253 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
254 
255 	/* The best solution means: when there are multiple iterations we must
256 	 * not fall below 2^30 as that would get too close to the false
257 	 * hard_limit and when we reach an interior iteration for soft_limit it
258 	 * has to be far away from 2**32-1 which is the counter reset point
259 	 * after the +2^31 to accommodate latency.
260 	 */
261 	attrs->lft.soft_packet_limit = lower_32_bits(start_value);
262 	attrs->lft.numb_rounds_soft = (u64)n;
263 }
264 
mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)265 static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
266 				  struct mlx5_accel_esp_xfrm_attrs *attrs)
267 {
268 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
269 	struct xfrm_state *x = sa_entry->x;
270 	struct net_device *netdev;
271 	struct neighbour *n;
272 	u8 addr[ETH_ALEN];
273 	const void *pkey;
274 	u8 *dst, *src;
275 
276 	if (attrs->mode != XFRM_MODE_TUNNEL ||
277 	    attrs->type != XFRM_DEV_OFFLOAD_PACKET)
278 		return;
279 
280 	netdev = x->xso.real_dev;
281 
282 	mlx5_query_mac_address(mdev, addr);
283 	switch (attrs->dir) {
284 	case XFRM_DEV_OFFLOAD_IN:
285 		src = attrs->dmac;
286 		dst = attrs->smac;
287 		pkey = &attrs->saddr.a4;
288 		break;
289 	case XFRM_DEV_OFFLOAD_OUT:
290 		src = attrs->smac;
291 		dst = attrs->dmac;
292 		pkey = &attrs->daddr.a4;
293 		break;
294 	default:
295 		return;
296 	}
297 
298 	ether_addr_copy(src, addr);
299 	n = neigh_lookup(&arp_tbl, pkey, netdev);
300 	if (!n) {
301 		n = neigh_create(&arp_tbl, pkey, netdev);
302 		if (IS_ERR(n))
303 			return;
304 		neigh_event_send(n, NULL);
305 		attrs->drop = true;
306 	} else {
307 		neigh_ha_snapshot(addr, n, netdev);
308 		ether_addr_copy(dst, addr);
309 	}
310 	neigh_release(n);
311 }
312 
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)313 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
314 					struct mlx5_accel_esp_xfrm_attrs *attrs)
315 {
316 	struct xfrm_state *x = sa_entry->x;
317 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
318 	struct aead_geniv_ctx *geniv_ctx;
319 	struct crypto_aead *aead;
320 	unsigned int crypto_data_len, key_len;
321 	int ivsize;
322 
323 	memset(attrs, 0, sizeof(*attrs));
324 
325 	/* key */
326 	crypto_data_len = (x->aead->alg_key_len + 7) / 8;
327 	key_len = crypto_data_len - 4; /* 4 bytes salt at end */
328 
329 	memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
330 	aes_gcm->key_len = key_len * 8;
331 
332 	/* salt and seq_iv */
333 	aead = x->data;
334 	geniv_ctx = crypto_aead_ctx(aead);
335 	ivsize = crypto_aead_ivsize(aead);
336 	memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
337 	memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
338 	       sizeof(aes_gcm->salt));
339 
340 	attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
341 
342 	/* iv len */
343 	aes_gcm->icv_len = x->aead->alg_icv_len;
344 
345 	attrs->dir = x->xso.dir;
346 
347 	/* esn */
348 	if (x->props.flags & XFRM_STATE_ESN) {
349 		attrs->replay_esn.trigger = true;
350 		attrs->replay_esn.esn = sa_entry->esn_state.esn;
351 		attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
352 		attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
353 		if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
354 			goto skip_replay_window;
355 
356 		switch (x->replay_esn->replay_window) {
357 		case 32:
358 			attrs->replay_esn.replay_window =
359 				MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
360 			break;
361 		case 64:
362 			attrs->replay_esn.replay_window =
363 				MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
364 			break;
365 		case 128:
366 			attrs->replay_esn.replay_window =
367 				MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
368 			break;
369 		case 256:
370 			attrs->replay_esn.replay_window =
371 				MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
372 			break;
373 		default:
374 			WARN_ON(true);
375 			return;
376 		}
377 	}
378 
379 skip_replay_window:
380 	/* spi */
381 	attrs->spi = be32_to_cpu(x->id.spi);
382 
383 	/* source , destination ips */
384 	memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
385 	memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
386 	attrs->family = x->props.family;
387 	attrs->type = x->xso.type;
388 	attrs->reqid = x->props.reqid;
389 	attrs->upspec.dport = ntohs(x->sel.dport);
390 	attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
391 	attrs->upspec.sport = ntohs(x->sel.sport);
392 	attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
393 	attrs->upspec.proto = x->sel.proto;
394 	attrs->mode = x->props.mode;
395 
396 	mlx5e_ipsec_init_limits(sa_entry, attrs);
397 	mlx5e_ipsec_init_macs(sa_entry, attrs);
398 
399 	if (x->encap) {
400 		attrs->encap = true;
401 		attrs->sport = x->encap->encap_sport;
402 		attrs->dport = x->encap->encap_dport;
403 	}
404 }
405 
mlx5e_xfrm_validate_state(struct mlx5_core_dev * mdev,struct xfrm_state * x,struct netlink_ext_ack * extack)406 static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
407 				     struct xfrm_state *x,
408 				     struct netlink_ext_ack *extack)
409 {
410 	if (x->props.aalgo != SADB_AALG_NONE) {
411 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
412 		return -EINVAL;
413 	}
414 	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
415 		NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
416 		return -EINVAL;
417 	}
418 	if (x->props.calgo != SADB_X_CALG_NONE) {
419 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
420 		return -EINVAL;
421 	}
422 	if (x->props.flags & XFRM_STATE_ESN &&
423 	    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
424 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
425 		return -EINVAL;
426 	}
427 	if (x->props.family != AF_INET &&
428 	    x->props.family != AF_INET6) {
429 		NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
430 		return -EINVAL;
431 	}
432 	if (x->id.proto != IPPROTO_ESP) {
433 		NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
434 		return -EINVAL;
435 	}
436 	if (x->encap) {
437 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
438 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
439 			return -EINVAL;
440 		}
441 
442 		if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
443 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
444 			return -EINVAL;
445 		}
446 
447 		if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
448 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
449 			return -EINVAL;
450 		}
451 
452 		if (x->props.mode != XFRM_MODE_TRANSPORT) {
453 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
454 			return -EINVAL;
455 		}
456 	}
457 	if (!x->aead) {
458 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
459 		return -EINVAL;
460 	}
461 	if (x->aead->alg_icv_len != 128) {
462 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
463 		return -EINVAL;
464 	}
465 	if ((x->aead->alg_key_len != 128 + 32) &&
466 	    (x->aead->alg_key_len != 256 + 32)) {
467 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
468 		return -EINVAL;
469 	}
470 	if (x->tfcpad) {
471 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
472 		return -EINVAL;
473 	}
474 	if (!x->geniv) {
475 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
476 		return -EINVAL;
477 	}
478 	if (strcmp(x->geniv, "seqiv")) {
479 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
480 		return -EINVAL;
481 	}
482 
483 	if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
484 	    x->sel.proto != IPPROTO_TCP) {
485 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
486 		return -EINVAL;
487 	}
488 
489 	if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) {
490 		NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
491 		return -EINVAL;
492 	}
493 
494 	switch (x->xso.type) {
495 	case XFRM_DEV_OFFLOAD_CRYPTO:
496 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
497 			NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
498 			return -EINVAL;
499 		}
500 
501 		break;
502 	case XFRM_DEV_OFFLOAD_PACKET:
503 		if (!(mlx5_ipsec_device_caps(mdev) &
504 		      MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
505 			NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
506 			return -EINVAL;
507 		}
508 
509 		if (x->props.mode == XFRM_MODE_TUNNEL &&
510 		    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) {
511 			NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode");
512 			return -EINVAL;
513 		}
514 
515 		if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
516 		    x->replay_esn->replay_window != 32 &&
517 		    x->replay_esn->replay_window != 64 &&
518 		    x->replay_esn->replay_window != 128 &&
519 		    x->replay_esn->replay_window != 256) {
520 			NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
521 			return -EINVAL;
522 		}
523 
524 		if (!x->props.reqid) {
525 			NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
526 			return -EINVAL;
527 		}
528 
529 		if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit &&
530 		    x->lft.hard_byte_limit != XFRM_INF) {
531 			/* XFRM stack doesn't prevent such configuration :(. */
532 			NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one");
533 			return -EINVAL;
534 		}
535 
536 		if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) {
537 			NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0");
538 			return -EINVAL;
539 		}
540 
541 		if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
542 		    x->lft.hard_packet_limit != XFRM_INF) {
543 			/* XFRM stack doesn't prevent such configuration :(. */
544 			NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
545 			return -EINVAL;
546 		}
547 
548 		if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) {
549 			NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0");
550 			return -EINVAL;
551 		}
552 		break;
553 	default:
554 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
555 		return -EINVAL;
556 	}
557 	return 0;
558 }
559 
mlx5e_ipsec_modify_state(struct work_struct * _work)560 static void mlx5e_ipsec_modify_state(struct work_struct *_work)
561 {
562 	struct mlx5e_ipsec_work *work =
563 		container_of(_work, struct mlx5e_ipsec_work, work);
564 	struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
565 	struct mlx5_accel_esp_xfrm_attrs *attrs;
566 
567 	attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
568 
569 	mlx5_accel_esp_modify_xfrm(sa_entry, attrs);
570 }
571 
mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry * sa_entry)572 static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
573 {
574 	struct xfrm_state *x = sa_entry->x;
575 
576 	if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO ||
577 	    x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
578 		return;
579 
580 	if (x->props.flags & XFRM_STATE_ESN) {
581 		sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn;
582 		return;
583 	}
584 
585 	sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
586 }
587 
mlx5e_ipsec_handle_netdev_event(struct work_struct * _work)588 static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
589 {
590 	struct mlx5e_ipsec_work *work =
591 		container_of(_work, struct mlx5e_ipsec_work, work);
592 	struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
593 	struct mlx5e_ipsec_netevent_data *data = work->data;
594 	struct mlx5_accel_esp_xfrm_attrs *attrs;
595 
596 	attrs = &sa_entry->attrs;
597 
598 	switch (attrs->dir) {
599 	case XFRM_DEV_OFFLOAD_IN:
600 		ether_addr_copy(attrs->smac, data->addr);
601 		break;
602 	case XFRM_DEV_OFFLOAD_OUT:
603 		ether_addr_copy(attrs->dmac, data->addr);
604 		break;
605 	default:
606 		WARN_ON_ONCE(true);
607 	}
608 	attrs->drop = false;
609 	mlx5e_accel_ipsec_fs_modify(sa_entry);
610 }
611 
mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry * sa_entry)612 static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
613 {
614 	struct xfrm_state *x = sa_entry->x;
615 	struct mlx5e_ipsec_work *work;
616 	void *data = NULL;
617 
618 	switch (x->xso.type) {
619 	case XFRM_DEV_OFFLOAD_CRYPTO:
620 		if (!(x->props.flags & XFRM_STATE_ESN))
621 			return 0;
622 		break;
623 	case XFRM_DEV_OFFLOAD_PACKET:
624 		if (x->props.mode != XFRM_MODE_TUNNEL)
625 			return 0;
626 		break;
627 	default:
628 		break;
629 	}
630 
631 	work = kzalloc(sizeof(*work), GFP_KERNEL);
632 	if (!work)
633 		return -ENOMEM;
634 
635 	switch (x->xso.type) {
636 	case XFRM_DEV_OFFLOAD_CRYPTO:
637 		data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
638 		if (!data)
639 			goto free_work;
640 
641 		INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
642 		break;
643 	case XFRM_DEV_OFFLOAD_PACKET:
644 		data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data),
645 			       GFP_KERNEL);
646 		if (!data)
647 			goto free_work;
648 
649 		INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
650 		break;
651 	default:
652 		break;
653 	}
654 
655 	work->data = data;
656 	work->sa_entry = sa_entry;
657 	sa_entry->work = work;
658 	return 0;
659 
660 free_work:
661 	kfree(work);
662 	return -ENOMEM;
663 }
664 
mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry * sa_entry)665 static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
666 {
667 	struct xfrm_state *x = sa_entry->x;
668 	struct mlx5e_ipsec_dwork *dwork;
669 
670 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
671 		return 0;
672 
673 	if (x->lft.soft_packet_limit == XFRM_INF &&
674 	    x->lft.hard_packet_limit == XFRM_INF &&
675 	    x->lft.soft_byte_limit == XFRM_INF &&
676 	    x->lft.hard_byte_limit == XFRM_INF)
677 		return 0;
678 
679 	dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
680 	if (!dwork)
681 		return -ENOMEM;
682 
683 	dwork->sa_entry = sa_entry;
684 	INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits);
685 	sa_entry->dwork = dwork;
686 	return 0;
687 }
688 
mlx5e_xfrm_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)689 static int mlx5e_xfrm_add_state(struct xfrm_state *x,
690 				struct netlink_ext_ack *extack)
691 {
692 	struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
693 	struct net_device *netdev = x->xso.real_dev;
694 	struct mlx5e_ipsec *ipsec;
695 	struct mlx5e_priv *priv;
696 	gfp_t gfp;
697 	int err;
698 
699 	priv = netdev_priv(netdev);
700 	if (!priv->ipsec)
701 		return -EOPNOTSUPP;
702 
703 	ipsec = priv->ipsec;
704 	gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
705 	sa_entry = kzalloc(sizeof(*sa_entry), gfp);
706 	if (!sa_entry)
707 		return -ENOMEM;
708 
709 	sa_entry->x = x;
710 	sa_entry->ipsec = ipsec;
711 	/* Check if this SA is originated from acquire flow temporary SA */
712 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
713 		goto out;
714 
715 	err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
716 	if (err)
717 		goto err_xfrm;
718 
719 	if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
720 		err = -EBUSY;
721 		goto err_xfrm;
722 	}
723 
724 	/* check esn */
725 	if (x->props.flags & XFRM_STATE_ESN)
726 		mlx5e_ipsec_update_esn_state(sa_entry);
727 
728 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
729 
730 	err = mlx5_ipsec_create_work(sa_entry);
731 	if (err)
732 		goto unblock_ipsec;
733 
734 	err = mlx5e_ipsec_create_dwork(sa_entry);
735 	if (err)
736 		goto release_work;
737 
738 	/* create hw context */
739 	err = mlx5_ipsec_create_sa_ctx(sa_entry);
740 	if (err)
741 		goto release_dwork;
742 
743 	err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
744 	if (err)
745 		goto err_hw_ctx;
746 
747 	if (x->props.mode == XFRM_MODE_TUNNEL &&
748 	    x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
749 	    !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
750 		NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
751 		err = -EINVAL;
752 		goto err_add_rule;
753 	}
754 
755 	/* We use *_bh() variant because xfrm_timer_handler(), which runs
756 	 * in softirq context, can reach our state delete logic and we need
757 	 * xa_erase_bh() there.
758 	 */
759 	err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
760 			   GFP_KERNEL);
761 	if (err)
762 		goto err_add_rule;
763 
764 	mlx5e_ipsec_set_esn_ops(sa_entry);
765 
766 	if (sa_entry->dwork)
767 		queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
768 				   MLX5_IPSEC_RESCHED);
769 
770 	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
771 	    x->props.mode == XFRM_MODE_TUNNEL)
772 		xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
773 			    MLX5E_IPSEC_TUNNEL_SA);
774 
775 out:
776 	x->xso.offload_handle = (unsigned long)sa_entry;
777 	return 0;
778 
779 err_add_rule:
780 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
781 err_hw_ctx:
782 	mlx5_ipsec_free_sa_ctx(sa_entry);
783 release_dwork:
784 	kfree(sa_entry->dwork);
785 release_work:
786 	if (sa_entry->work)
787 		kfree(sa_entry->work->data);
788 	kfree(sa_entry->work);
789 unblock_ipsec:
790 	mlx5_eswitch_unblock_ipsec(priv->mdev);
791 err_xfrm:
792 	kfree(sa_entry);
793 	NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
794 	return err;
795 }
796 
mlx5e_xfrm_del_state(struct xfrm_state * x)797 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
798 {
799 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
800 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
801 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
802 	struct mlx5e_ipsec_sa_entry *old;
803 
804 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
805 		return;
806 
807 	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
808 	WARN_ON(old != sa_entry);
809 
810 	if (attrs->mode == XFRM_MODE_TUNNEL &&
811 	    attrs->type == XFRM_DEV_OFFLOAD_PACKET)
812 		/* Make sure that no ARP requests are running in parallel */
813 		flush_workqueue(ipsec->wq);
814 
815 }
816 
mlx5e_xfrm_free_state(struct xfrm_state * x)817 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
818 {
819 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
820 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
821 
822 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
823 		goto sa_entry_free;
824 
825 	if (sa_entry->work)
826 		cancel_work_sync(&sa_entry->work->work);
827 
828 	if (sa_entry->dwork)
829 		cancel_delayed_work_sync(&sa_entry->dwork->dwork);
830 
831 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
832 	mlx5_ipsec_free_sa_ctx(sa_entry);
833 	kfree(sa_entry->dwork);
834 	if (sa_entry->work)
835 		kfree(sa_entry->work->data);
836 	kfree(sa_entry->work);
837 	mlx5_eswitch_unblock_ipsec(ipsec->mdev);
838 sa_entry_free:
839 	kfree(sa_entry);
840 }
841 
mlx5e_ipsec_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)842 static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
843 				      unsigned long event, void *ptr)
844 {
845 	struct mlx5_accel_esp_xfrm_attrs *attrs;
846 	struct mlx5e_ipsec_netevent_data *data;
847 	struct mlx5e_ipsec_sa_entry *sa_entry;
848 	struct mlx5e_ipsec *ipsec;
849 	struct neighbour *n = ptr;
850 	struct net_device *netdev;
851 	struct xfrm_state *x;
852 	unsigned long idx;
853 
854 	if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
855 		return NOTIFY_DONE;
856 
857 	ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
858 	xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
859 		attrs = &sa_entry->attrs;
860 
861 		if (attrs->family == AF_INET) {
862 			if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
863 			    !neigh_key_eq32(n, &attrs->daddr.a4))
864 				continue;
865 		} else {
866 			if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
867 			    !neigh_key_eq128(n, &attrs->daddr.a4))
868 				continue;
869 		}
870 
871 		x = sa_entry->x;
872 		netdev = x->xso.real_dev;
873 		data = sa_entry->work->data;
874 
875 		neigh_ha_snapshot(data->addr, n, netdev);
876 		queue_work(ipsec->wq, &sa_entry->work->work);
877 	}
878 
879 	return NOTIFY_DONE;
880 }
881 
mlx5e_ipsec_init(struct mlx5e_priv * priv)882 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
883 {
884 	struct mlx5e_ipsec *ipsec;
885 	int ret = -ENOMEM;
886 
887 	if (!mlx5_ipsec_device_caps(priv->mdev)) {
888 		netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
889 		return;
890 	}
891 
892 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
893 	if (!ipsec)
894 		return;
895 
896 	xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
897 	ipsec->mdev = priv->mdev;
898 	init_completion(&ipsec->comp);
899 	ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
900 				    priv->netdev->name);
901 	if (!ipsec->wq)
902 		goto err_wq;
903 
904 	if (mlx5_ipsec_device_caps(priv->mdev) &
905 	    MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
906 		ret = mlx5e_ipsec_aso_init(ipsec);
907 		if (ret)
908 			goto err_aso;
909 	}
910 
911 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
912 		ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
913 		ret = register_netevent_notifier(&ipsec->netevent_nb);
914 		if (ret)
915 			goto clear_aso;
916 	}
917 
918 	ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
919 	ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom);
920 	if (ret)
921 		goto err_fs_init;
922 
923 	ipsec->fs = priv->fs;
924 	priv->ipsec = ipsec;
925 	netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
926 	return;
927 
928 err_fs_init:
929 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
930 		unregister_netevent_notifier(&ipsec->netevent_nb);
931 clear_aso:
932 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
933 		mlx5e_ipsec_aso_cleanup(ipsec);
934 err_aso:
935 	destroy_workqueue(ipsec->wq);
936 err_wq:
937 	kfree(ipsec);
938 	mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
939 	return;
940 }
941 
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)942 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
943 {
944 	struct mlx5e_ipsec *ipsec = priv->ipsec;
945 
946 	if (!ipsec)
947 		return;
948 
949 	mlx5e_accel_ipsec_fs_cleanup(ipsec);
950 	if (ipsec->netevent_nb.notifier_call) {
951 		unregister_netevent_notifier(&ipsec->netevent_nb);
952 		ipsec->netevent_nb.notifier_call = NULL;
953 	}
954 	if (ipsec->aso)
955 		mlx5e_ipsec_aso_cleanup(ipsec);
956 	destroy_workqueue(ipsec->wq);
957 	kfree(ipsec);
958 	priv->ipsec = NULL;
959 }
960 
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)961 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
962 {
963 	if (x->props.family == AF_INET) {
964 		/* Offload with IPv4 options is not supported yet */
965 		if (ip_hdr(skb)->ihl > 5)
966 			return false;
967 	} else {
968 		/* Offload with IPv6 extension headers is not support yet */
969 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
970 			return false;
971 	}
972 
973 	return true;
974 }
975 
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)976 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
977 {
978 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
979 	struct mlx5e_ipsec_work *work = sa_entry->work;
980 	struct mlx5e_ipsec_sa_entry *sa_entry_shadow;
981 	bool need_update;
982 
983 	need_update = mlx5e_ipsec_update_esn_state(sa_entry);
984 	if (!need_update)
985 		return;
986 
987 	sa_entry_shadow = work->data;
988 	memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow));
989 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs);
990 	queue_work(sa_entry->ipsec->wq, &work->work);
991 }
992 
mlx5e_xfrm_update_stats(struct xfrm_state * x)993 static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
994 {
995 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
996 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
997 	struct net *net = dev_net(x->xso.dev);
998 	u64 trailer_packets = 0, trailer_bytes = 0;
999 	u64 replay_packets = 0, replay_bytes = 0;
1000 	u64 auth_packets = 0, auth_bytes = 0;
1001 	u64 success_packets, success_bytes;
1002 	u64 packets, bytes, lastuse;
1003 	size_t headers;
1004 
1005 	lockdep_assert(lockdep_is_held(&x->lock) ||
1006 		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
1007 		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
1008 
1009 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
1010 		return;
1011 
1012 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1013 		mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
1014 				     &auth_packets, &lastuse);
1015 		x->stats.integrity_failed += auth_packets;
1016 		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
1017 
1018 		mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
1019 				     &trailer_packets, &lastuse);
1020 		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
1021 	}
1022 
1023 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1024 		return;
1025 
1026 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1027 		mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
1028 				     &replay_packets, &lastuse);
1029 		x->stats.replay += replay_packets;
1030 		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
1031 	}
1032 
1033 	mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
1034 	success_packets = packets - auth_packets - trailer_packets - replay_packets;
1035 	x->curlft.packets += success_packets;
1036 	/* NIC counts all bytes passed through flow steering and doesn't have
1037 	 * an ability to count payload data size which is needed for SA.
1038 	 *
1039 	 * To overcome HW limitestion, let's approximate the payload size
1040 	 * by removing always available headers.
1041 	 */
1042 	headers = sizeof(struct ethhdr);
1043 	if (sa_entry->attrs.family == AF_INET)
1044 		headers += sizeof(struct iphdr);
1045 	else
1046 		headers += sizeof(struct ipv6hdr);
1047 
1048 	success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
1049 	x->curlft.bytes += success_bytes - headers * success_packets;
1050 }
1051 
mlx5e_xfrm_validate_policy(struct mlx5_core_dev * mdev,struct xfrm_policy * x,struct netlink_ext_ack * extack)1052 static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
1053 				      struct xfrm_policy *x,
1054 				      struct netlink_ext_ack *extack)
1055 {
1056 	struct xfrm_selector *sel = &x->selector;
1057 
1058 	if (x->type != XFRM_POLICY_TYPE_MAIN) {
1059 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
1060 		return -EINVAL;
1061 	}
1062 
1063 	/* Please pay attention that we support only one template */
1064 	if (x->xfrm_nr > 1) {
1065 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
1066 		return -EINVAL;
1067 	}
1068 
1069 	if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
1070 	    x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
1071 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
1072 		return -EINVAL;
1073 	}
1074 
1075 	if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
1076 	    addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
1077 		NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
1078 		return -EINVAL;
1079 	}
1080 
1081 	if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
1082 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
1083 		return -EINVAL;
1084 	}
1085 
1086 	if (x->selector.proto != IPPROTO_IP &&
1087 	    x->selector.proto != IPPROTO_UDP &&
1088 	    x->selector.proto != IPPROTO_TCP) {
1089 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
1090 		return -EINVAL;
1091 	}
1092 
1093 	if (x->priority) {
1094 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
1095 			NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
1096 			return -EINVAL;
1097 		}
1098 
1099 		if (x->priority == U32_MAX) {
1100 			NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
1101 			return -EINVAL;
1102 		}
1103 	}
1104 
1105 	if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
1106 	    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
1107 		NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
1108 		return -EINVAL;
1109 	}
1110 
1111 	return 0;
1112 }
1113 
1114 static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry * pol_entry,struct mlx5_accel_pol_xfrm_attrs * attrs)1115 mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
1116 				  struct mlx5_accel_pol_xfrm_attrs *attrs)
1117 {
1118 	struct xfrm_policy *x = pol_entry->x;
1119 	struct xfrm_selector *sel;
1120 
1121 	sel = &x->selector;
1122 	memset(attrs, 0, sizeof(*attrs));
1123 
1124 	memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
1125 	memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
1126 	attrs->family = sel->family;
1127 	attrs->dir = x->xdo.dir;
1128 	attrs->action = x->action;
1129 	attrs->type = XFRM_DEV_OFFLOAD_PACKET;
1130 	attrs->reqid = x->xfrm_vec[0].reqid;
1131 	attrs->upspec.dport = ntohs(sel->dport);
1132 	attrs->upspec.dport_mask = ntohs(sel->dport_mask);
1133 	attrs->upspec.sport = ntohs(sel->sport);
1134 	attrs->upspec.sport_mask = ntohs(sel->sport_mask);
1135 	attrs->upspec.proto = sel->proto;
1136 	attrs->prio = x->priority;
1137 }
1138 
mlx5e_xfrm_add_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)1139 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
1140 				 struct netlink_ext_ack *extack)
1141 {
1142 	struct net_device *netdev = x->xdo.real_dev;
1143 	struct mlx5e_ipsec_pol_entry *pol_entry;
1144 	struct mlx5e_priv *priv;
1145 	int err;
1146 
1147 	priv = netdev_priv(netdev);
1148 	if (!priv->ipsec) {
1149 		NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
1150 		return -EOPNOTSUPP;
1151 	}
1152 
1153 	err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
1154 	if (err)
1155 		return err;
1156 
1157 	pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
1158 	if (!pol_entry)
1159 		return -ENOMEM;
1160 
1161 	pol_entry->x = x;
1162 	pol_entry->ipsec = priv->ipsec;
1163 
1164 	if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
1165 		err = -EBUSY;
1166 		goto ipsec_busy;
1167 	}
1168 
1169 	mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
1170 	err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
1171 	if (err)
1172 		goto err_fs;
1173 
1174 	x->xdo.offload_handle = (unsigned long)pol_entry;
1175 	return 0;
1176 
1177 err_fs:
1178 	mlx5_eswitch_unblock_ipsec(priv->mdev);
1179 ipsec_busy:
1180 	kfree(pol_entry);
1181 	NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
1182 	return err;
1183 }
1184 
mlx5e_xfrm_del_policy(struct xfrm_policy * x)1185 static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
1186 {
1187 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1188 
1189 	mlx5e_accel_ipsec_fs_del_pol(pol_entry);
1190 	mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
1191 }
1192 
mlx5e_xfrm_free_policy(struct xfrm_policy * x)1193 static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
1194 {
1195 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1196 
1197 	kfree(pol_entry);
1198 }
1199 
1200 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
1201 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
1202 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
1203 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
1204 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
1205 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
1206 
1207 	.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
1208 	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
1209 	.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
1210 	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
1211 };
1212 
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)1213 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1214 {
1215 	struct mlx5_core_dev *mdev = priv->mdev;
1216 	struct net_device *netdev = priv->netdev;
1217 
1218 	if (!mlx5_ipsec_device_caps(mdev))
1219 		return;
1220 
1221 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
1222 
1223 	netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
1224 	netdev->features |= NETIF_F_HW_ESP;
1225 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
1226 
1227 	if (!MLX5_CAP_ETH(mdev, swp_csum)) {
1228 		mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
1229 		return;
1230 	}
1231 
1232 	netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
1233 	netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
1234 
1235 	if (!MLX5_CAP_ETH(mdev, swp_lso)) {
1236 		mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
1237 		return;
1238 	}
1239 
1240 	netdev->gso_partial_features |= NETIF_F_GSO_ESP;
1241 	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
1242 	netdev->features |= NETIF_F_GSO_ESP;
1243 	netdev->hw_features |= NETIF_F_GSO_ESP;
1244 	netdev->hw_enc_features |= NETIF_F_GSO_ESP;
1245 }
1246