xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_crypto.c
21  *
22  * This source file contains linux specific definitions for QDF crypto APIs
23  */
24 
25 /* Include Files */
26 #include "qdf_crypto.h"
27 #include <linux/export.h>
28 #include <crypto/hash.h>
29 #include <crypto/aes.h>
30 #include <crypto/skcipher.h>
31 #include <crypto/aead.h>
32 #include <linux/ieee80211.h>
33 #include <qdf_module.h>
34 
35 /* Function Definitions and Documentation */
36 #define MAX_HMAC_ELEMENT_CNT 10
37 
38 /*
39  * xor: API to calculate xor
40  * @a: first variable
41  * @b: second variable
42  * @len: length of variables
43  */
44 static void xor(uint8_t *a, const uint8_t *b, size_t len)
45 {
46 	unsigned int i;
47 
48 	for (i = 0; i < len; i++)
49 	a[i] ^= b[i];
50 }
51 
52 int qdf_get_hash(uint8_t *type,
53 		uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len,
54 		int8_t *hash)
55 {
56 	return qdf_get_hmac_hash(type, NULL, 0, element_cnt,
57 				 addr, addr_len, hash);
58 }
59 
60 int qdf_get_hmac_hash(uint8_t *type, uint8_t *key,
61 		uint32_t keylen,
62 		uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len,
63 		int8_t *hash)
64 {
65 	int i;
66 	size_t src_len[MAX_HMAC_ELEMENT_CNT];
67 
68 	if (element_cnt > MAX_HMAC_ELEMENT_CNT) {
69 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
70 			  FL("Invalid element count %d"), element_cnt);
71 		return -EINVAL;
72 	}
73 
74 	for (i = 0; i < element_cnt; i++)
75 		src_len[i] = addr_len[i];
76 
77 	return qdf_get_keyed_hash(type, key, keylen, (const uint8_t **)addr,
78 				  src_len, element_cnt,  hash);
79 }
80 
81 QDF_STATUS
82 qdf_default_hmac_sha256_kdf(uint8_t *secret, uint32_t secret_len,
83 			    uint8_t *label, uint8_t *optional_data,
84 			    uint32_t optional_data_len, uint8_t *key,
85 			    uint32_t keylen)
86 {
87 	uint8_t tmp_hash[SHA256_DIGEST_SIZE] = {0};
88 	uint8_t count = 1;
89 	uint8_t *addr[4];
90 	uint32_t len[4];
91 	uint32_t current_position = 0, remaining_data = SHA256_DIGEST_SIZE;
92 
93 	addr[0] = tmp_hash;
94 	len[0] = SHA256_DIGEST_SIZE;
95 	addr[1] = label;
96 	len[1] = strlen(label) + 1;
97 	addr[2] = optional_data;
98 	len[2] = optional_data_len;
99 	addr[3] = &count;
100 	len[3] = 1;
101 
102 	if (keylen == 0 ||
103 	    (keylen > (WLAN_MAX_PRF_INTERATIONS_COUNT * SHA256_DIGEST_SIZE))) {
104 		qdf_err("invalid key length %d", keylen);
105 		return QDF_STATUS_E_FAILURE;
106 	}
107 
108 	/* Create T1 */
109 	if (qdf_get_hmac_hash(HMAC_SHA256_CRYPTO_TYPE, secret, secret_len, 3,
110 			      &addr[1], &len[1], tmp_hash) < 0) {
111 		qdf_err("failed to get hmac hash");
112 		return QDF_STATUS_E_FAILURE;
113 	}
114 
115 	/* Update hash from tmp_hash */
116 	qdf_mem_copy(key + current_position, tmp_hash, remaining_data);
117 	current_position += remaining_data;
118 
119 	for (count = 2; current_position < keylen; count++) {
120 		remaining_data = keylen - current_position;
121 		if (remaining_data > SHA256_DIGEST_SIZE)
122 			remaining_data = SHA256_DIGEST_SIZE;
123 
124 		/* Create T-n */
125 		if (qdf_get_hmac_hash(HMAC_SHA256_CRYPTO_TYPE, secret,
126 				      secret_len, 4, addr, len, tmp_hash) < 0) {
127 			qdf_err("failed to get hmac hash");
128 			return QDF_STATUS_E_FAILURE;
129 		}
130 		/* Update hash from tmp_hash */
131 		qdf_mem_copy(key + current_position, tmp_hash, remaining_data);
132 		current_position += remaining_data;
133 	}
134 
135 	return QDF_STATUS_SUCCESS;
136 }
137 
138 /* qdf_update_dbl from RFC 5297. Length of d is AES_BLOCK_SIZE (128 bits) */
139 void qdf_update_dbl(uint8_t *d)
140 {
141 	int i;
142 	uint8_t msb, msb_prev = 0;
143 
144 	/* left shift by 1 */
145 	for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) {
146 		msb = d[i] & 0x80;
147 		d[i] = d[i] << 1;
148 		d[i] += msb_prev ? 1 : 0;
149 		msb_prev = msb;
150 	}
151 
152 	if (msb)
153 		d[AES_BLOCK_SIZE - 1] ^= 0x87;
154 }
155 
156 static inline void xor_128(const uint8_t *a, const uint8_t *b, uint8_t *out)
157 {
158 	uint8_t i;
159 
160 	for (i = 0; i < AES_BLOCK_SIZE; i++)
161 		out[i] = a[i] ^ b[i];
162 }
163 
164 static inline void leftshift_onebit(const uint8_t *input, uint8_t *output)
165 {
166 	int i, overflow = 0;
167 
168 	for (i = (AES_BLOCK_SIZE - 1); i >= 0; i--) {
169 		output[i] = input[i] << 1;
170 		output[i] |= overflow;
171 		overflow = (input[i] & 0x80) ? 1 : 0;
172 	}
173 }
174 
175 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
176 static void
177 generate_subkey(struct crypto_aes_ctx *aes_ctx, uint8_t *k1, uint8_t *k2)
178 {
179 	uint8_t l[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE];
180 	const uint8_t const_rb[AES_BLOCK_SIZE] = {
181 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
182 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87
183 	};
184 	const uint8_t const_zero[AES_BLOCK_SIZE] = {
185 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
186 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
187 	};
188 
189 	aes_encrypt(aes_ctx, l, const_zero);
190 
191 	if ((l[0] & 0x80) == 0) {       /* If MSB(l) = 0, then k1 = l << 1 */
192 		leftshift_onebit(l, k1);
193 	} else {                /* Else k1 = ( l << 1 ) (+) Rb */
194 		leftshift_onebit(l, tmp);
195 		xor_128(tmp, const_rb, k1);
196 	}
197 
198 	if ((k1[0] & 0x80) == 0) {
199 		leftshift_onebit(k1, k2);
200 	} else {
201 		leftshift_onebit(k1, tmp);
202 		xor_128(tmp, const_rb, k2);
203 	}
204 }
205 #else
206 static void
207 generate_subkey(struct crypto_cipher *tfm, uint8_t *k1, uint8_t *k2)
208 {
209 	uint8_t l[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE];
210 	const uint8_t const_rb[AES_BLOCK_SIZE] = {
211 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
212 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87
213 	};
214 	const uint8_t const_zero[AES_BLOCK_SIZE] = {
215 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
216 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
217 	};
218 
219 	crypto_cipher_encrypt_one(tfm, l, const_zero);
220 
221 	if ((l[0] & 0x80) == 0) {       /* If MSB(l) = 0, then k1 = l << 1 */
222 		leftshift_onebit(l, k1);
223 	} else {                /* Else k1 = ( l << 1 ) (+) Rb */
224 		leftshift_onebit(l, tmp);
225 		xor_128(tmp, const_rb, k1);
226 	}
227 
228 	if ((k1[0] & 0x80) == 0) {
229 		leftshift_onebit(k1, k2);
230 	} else {
231 		leftshift_onebit(k1, tmp);
232 		xor_128(tmp, const_rb, k2);
233 	}
234 }
235 #endif
236 
237 static inline void padding(const uint8_t *lastb, uint8_t *pad, uint16_t length)
238 {
239 	uint8_t j;
240 
241 	/* original last block */
242 	for (j = 0; j < AES_BLOCK_SIZE; j++) {
243 		if (j < length)
244 			pad[j] = lastb[j];
245 		else if (j == length)
246 			pad[j] = 0x80;
247 		else
248 			pad[j] = 0x00;
249 	}
250 }
251 
252 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
253 int qdf_crypto_aes_128_cmac(const uint8_t *key, const uint8_t *data,
254 			    uint16_t len, uint8_t *mic)
255 {
256 	uint8_t x[AES_BLOCK_SIZE], y[AES_BLOCK_SIZE];
257 	uint8_t m_last[AES_BLOCK_SIZE], padded[AES_BLOCK_SIZE];
258 	uint8_t k1[AES_KEYSIZE_128], k2[AES_KEYSIZE_128];
259 	int cmp_blk;
260 	int i, num_block = (len + 15) / AES_BLOCK_SIZE;
261 	struct crypto_aes_ctx aes_ctx;
262 	int ret;
263 
264 	/*
265 	 * Calculate MIC and then copy
266 	 */
267 	ret = aes_expandkey(&aes_ctx, key, AES_KEYSIZE_128);
268 	if (ret) {
269 		qdf_err("aes_expandkey failed (%d)", ret);
270 		return ret;
271 	}
272 
273 	generate_subkey(&aes_ctx, k1, k2);
274 
275 	if (num_block == 0) {
276 		num_block = 1;
277 		cmp_blk = 0;
278 	} else {
279 		cmp_blk = ((len % AES_BLOCK_SIZE) == 0) ? 1 : 0;
280 	}
281 
282 	if (cmp_blk) {
283 		/* Last block is complete block */
284 		xor_128(&data[AES_BLOCK_SIZE * (num_block - 1)], k1, m_last);
285 	} else {
286 		/* Last block is not complete block */
287 		padding(&data[AES_BLOCK_SIZE * (num_block - 1)], padded,
288 			len % AES_BLOCK_SIZE);
289 		xor_128(padded, k2, m_last);
290 	}
291 
292 	for (i = 0; i < AES_BLOCK_SIZE; i++)
293 		x[i] = 0;
294 
295 	for (i = 0; i < (num_block - 1); i++) {
296 		/* y = Mi (+) x */
297 		xor_128(x, &data[AES_BLOCK_SIZE * i], y);
298 		/* x = AES-128(KEY, y) */
299 		aes_encrypt(&aes_ctx, x, y);
300 	}
301 
302 	xor_128(x, m_last, y);
303 	aes_encrypt(&aes_ctx, x, y);
304 	memzero_explicit(&aes_ctx, sizeof(aes_ctx));
305 
306 	memcpy(mic, x, CMAC_TLEN);
307 
308 	return 0;
309 }
310 #else
311 int qdf_crypto_aes_128_cmac(const uint8_t *key, const uint8_t *data,
312 			    uint16_t len, uint8_t *mic)
313 {
314 	uint8_t x[AES_BLOCK_SIZE], y[AES_BLOCK_SIZE];
315 	uint8_t m_last[AES_BLOCK_SIZE], padded[AES_BLOCK_SIZE];
316 	uint8_t k1[AES_KEYSIZE_128], k2[AES_KEYSIZE_128];
317 	int cmp_blk;
318 	int i, num_block = (len + 15) / AES_BLOCK_SIZE;
319 	struct crypto_cipher *tfm;
320 	int ret;
321 
322 	/*
323 	 * Calculate MIC and then copy
324 	 */
325 	tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
326 	if (IS_ERR(tfm)) {
327 		ret = PTR_ERR(tfm);
328 		qdf_err("crypto_alloc_cipher failed (%d)", ret);
329 		return ret;
330 	}
331 
332 	ret = crypto_cipher_setkey(tfm, key, AES_KEYSIZE_128);
333 	if (ret) {
334 		qdf_err("crypto_cipher_setkey failed (%d)", ret);
335 		crypto_free_cipher(tfm);
336 		return ret;
337 	}
338 
339 	generate_subkey(tfm, k1, k2);
340 
341 	if (num_block == 0) {
342 		num_block = 1;
343 		cmp_blk = 0;
344 	} else {
345 		cmp_blk = ((len % AES_BLOCK_SIZE) == 0) ? 1 : 0;
346 	}
347 
348 	if (cmp_blk) {
349 		/* Last block is complete block */
350 		xor_128(&data[AES_BLOCK_SIZE * (num_block - 1)], k1, m_last);
351 	} else {
352 		/* Last block is not complete block */
353 		padding(&data[AES_BLOCK_SIZE * (num_block - 1)], padded,
354 			len % AES_BLOCK_SIZE);
355 		xor_128(padded, k2, m_last);
356 	}
357 
358 	for (i = 0; i < AES_BLOCK_SIZE; i++)
359 		x[i] = 0;
360 
361 	for (i = 0; i < (num_block - 1); i++) {
362 		/* y = Mi (+) x */
363 		xor_128(x, &data[AES_BLOCK_SIZE * i], y);
364 		/* x = AES-128(KEY, y) */
365 		crypto_cipher_encrypt_one(tfm, x, y);
366 	}
367 
368 	xor_128(x, m_last, y);
369 	crypto_cipher_encrypt_one(tfm, x, y);
370 
371 	crypto_free_cipher(tfm);
372 
373 	memcpy(mic, x, CMAC_TLEN);
374 
375 	return 0;
376 }
377 #endif
378 
379 /**
380  * set_desc_flags() - set flags variable in the shash_desc struct
381  * @desc: pointer to shash_desc struct
382  * @tfm: pointer to crypto_shash struct
383  *
384  * Set the flags variable in the shash_desc struct by getting the flag
385  * from the crypto_hash struct. The flag is not actually used, prompting
386  * its removal from kernel code in versions 5.2 and above. Thus, for
387  * versions 5.2 and above, do not set the flag variable of shash_desc.
388  */
389 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0))
390 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm)
391 {
392 	desc->flags = crypto_shash_get_flags(tfm);
393 }
394 #else
395 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm)
396 {
397 }
398 #endif
399 
400 int qdf_get_keyed_hash(const char *alg, const uint8_t *key,
401 			unsigned int key_len, const uint8_t *src[],
402 			size_t *src_len, size_t num_elements, uint8_t *out)
403 {
404 	struct crypto_shash *tfm;
405 	int ret;
406 	size_t i;
407 
408 	tfm = crypto_alloc_shash(alg, 0, CRYPTO_ALG_ASYNC);
409 	if (IS_ERR(tfm)) {
410 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
411 			  FL("Failed to allocate transformation for %s: %ld"),
412 			  alg, PTR_ERR(tfm));
413 		return -EINVAL;
414 	}
415 
416 	if (key && key_len) {
417 		ret = crypto_shash_setkey(tfm, key, key_len);
418 		if (ret) {
419 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
420 				  FL("Set key failed for %s, ret:%d"),
421 				  alg, -ret);
422 			goto error;
423 		}
424 	}
425 
426 	do {
427 		SHASH_DESC_ON_STACK(desc, tfm);
428 		desc->tfm = tfm;
429 		set_desc_flags(desc, tfm);
430 
431 		ret = crypto_shash_init(desc);
432 		if (ret) {
433 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
434 				  FL("Failed to init hash for %s, ret:%d"),
435 				  alg, -ret);
436 			goto error;
437 		}
438 
439 		for (i = 0; i < num_elements; i++) {
440 			ret = crypto_shash_update(desc, src[i], src_len[i]);
441 			if (ret) {
442 				QDF_TRACE(QDF_MODULE_ID_QDF,
443 					  QDF_TRACE_LEVEL_ERROR,
444 					  FL("Failed to update hash for %s, ret:%d"),
445 					  alg, -ret);
446 				goto error;
447 			}
448 		}
449 
450 		ret = crypto_shash_final(desc, out);
451 		if (ret)
452 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
453 				  FL("Failed to get digest for %s, ret:%d"),
454 				  alg, -ret);
455 	} while (0);
456 
457 error:
458 	crypto_free_shash(tfm);
459 	return ret;
460 }
461 
462 qdf_export_symbol(qdf_get_keyed_hash);
463 
464 /* AES String to Vector from RFC 5297, 'out' should be of length AES_BLOCK_SIZE
465  */
466 int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[],
467 		   size_t s_len[], size_t num_s, uint8_t *out)
468 {
469 	const char *alg = "cmac(aes)";
470 	uint8_t d[AES_BLOCK_SIZE];
471 	uint8_t buf[AES_BLOCK_SIZE] = { 0 };
472 	size_t buf_len = AES_BLOCK_SIZE;
473 	const uint8_t *a[1];
474 	unsigned int i;
475 	uint8_t *t = NULL;
476 	size_t t_len;
477 	int ret;
478 
479 	if (num_s == 0) {
480 		/* V = AES-CMAC(K, <one>) */
481 		buf[0] = 0x01;
482 		a[0] = buf;
483 		ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1,
484 					 out);
485 		return ret;
486 	}
487 
488 	/* D = AES-CMAC(K, <zero>) */
489 	a[0] = buf;
490 	ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, d);
491 	if (ret)
492 		goto error;
493 
494 	for (i = 0; i < num_s - 1; i++) {
495 		/* D = qdf_update_dbl(D) xor AES-CMAC(K, Si) */
496 		qdf_update_dbl(d);
497 		ret = qdf_get_keyed_hash(alg, key, key_len, &s[i], &s_len[i], 1,
498 					 buf);
499 		if (ret)
500 			goto error;
501 		xor(d, buf, AES_BLOCK_SIZE);
502 	}
503 
504 	if (s_len[i] >= AES_BLOCK_SIZE) {
505 		/* len(Sn) >= 128 */
506 		/* T = Sn xorend D */
507 		t = qdf_mem_malloc(s_len[i]);
508 		if (!t)
509 			return -EINVAL;
510 		qdf_mem_copy(t, s[i], s_len[i]);
511 		xor(t + s_len[i] - AES_BLOCK_SIZE, d, AES_BLOCK_SIZE);
512 		t_len = s_len[i];
513 	} else {
514 		/* len(Sn) < 128 */
515 		/* T = qdf_update_dbl(D) xor pad(Sn) */
516 		qdf_update_dbl(d);
517 		qdf_mem_zero(buf, AES_BLOCK_SIZE);
518 		qdf_mem_copy(buf, s[i], s_len[i]);
519 		buf[s_len[i]] = 0x80;
520 		xor(d, s[i], AES_BLOCK_SIZE);
521 		t = d;
522 		t_len = AES_BLOCK_SIZE;
523 	}
524 
525 	/* V = AES-CMAC(K, T) */
526 	a[0] = t;
527 	ret = qdf_get_keyed_hash(alg, key, key_len, a, &t_len, 1, out);
528 
529 error:
530 	if (t && t != d)
531 		qdf_mem_free(t);
532 	return ret;
533 }
534 
535 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
536 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
537 		const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
538 {
539 	struct crypto_skcipher *tfm;
540 	struct skcipher_request *req = NULL;
541 	struct scatterlist sg_in, sg_out;
542 	int ret;
543 
544 	if (!IS_VALID_CTR_KEY_LEN(key_len)) {
545 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
546 			  FL("Invalid key length: %u"), key_len);
547 		return -EINVAL;
548 	}
549 
550 	tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
551 	if (IS_ERR(tfm)) {
552 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
553 			  FL("Failed to alloc transformation for ctr(aes):%ld"),
554 			  PTR_ERR(tfm));
555 		return -EAGAIN;
556 	}
557 
558 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
559 	if (!req) {
560 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
561 			  FL("Failed to allocate request for ctr(aes)"));
562 		crypto_free_skcipher(tfm);
563 		return -EAGAIN;
564 	}
565 
566 	ret = crypto_skcipher_setkey(tfm, key, key_len);
567 	if (ret) {
568 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
569 			  FL("Set key failed for ctr(aes), ret:%d"), -ret);
570 		skcipher_request_free(req);
571 		crypto_free_skcipher(tfm);
572 		return ret;
573 	}
574 
575 	sg_init_one(&sg_in, src, src_len);
576 	sg_init_one(&sg_out, dest, src_len);
577 	skcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv);
578 
579 	if (enc)
580 		ret = crypto_skcipher_encrypt(req);
581 	else
582 		ret = crypto_skcipher_decrypt(req);
583 
584 	if (ret) {
585 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
586 			  FL("%s failed for ctr(aes), ret:%d"),
587 			  enc ? "Encryption" : "Decryption", -ret);
588 	}
589 
590 	skcipher_request_free(req);
591 	crypto_free_skcipher(tfm);
592 	return ret;
593 }
594 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
595 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
596 		const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
597 {
598 	struct crypto_ablkcipher *tfm;
599 	struct ablkcipher_request *req = NULL;
600 	struct scatterlist sg_in, sg_out;
601 	int ret;
602 
603 	if (!IS_VALID_CTR_KEY_LEN(key_len)) {
604 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
605 			  FL("Invalid key length: %u"), key_len);
606 		return -EINVAL;
607 	}
608 
609 	tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
610 	if (IS_ERR(tfm)) {
611 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
612 			  FL("Failed to alloc transformation for ctr(aes):%ld"),
613 			  PTR_ERR(tfm));
614 		return -EAGAIN;
615 	}
616 
617 	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
618 	if (!req) {
619 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
620 			  FL("Failed to allocate request for ctr(aes)"));
621 		crypto_free_ablkcipher(tfm);
622 		return -EAGAIN;
623 	}
624 
625 	ret = crypto_ablkcipher_setkey(tfm, key, key_len);
626 	if (ret) {
627 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
628 			  FL("Set key failed for ctr(aes), ret:%d"), -ret);
629 		ablkcipher_request_free(req);
630 		crypto_free_ablkcipher(tfm);
631 		return ret;
632 	}
633 
634 	sg_init_one(&sg_in, src, src_len);
635 	sg_init_one(&sg_out, dest, src_len);
636 	ablkcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv);
637 
638 	if (enc)
639 		ret = crypto_ablkcipher_encrypt(req);
640 	else
641 		ret = crypto_ablkcipher_decrypt(req);
642 
643 	if (ret) {
644 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
645 			  FL("%s failed for ctr(aes), ret:%d"),
646 			  enc ? "Encryption" : "Decryption", -ret);
647 	}
648 
649 	ablkcipher_request_free(req);
650 	crypto_free_ablkcipher(tfm);
651 
652 	return ret;
653 }
654 #else
655 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
656 		const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
657 {
658 	return -EINVAL;
659 }
660 #endif
661 
662 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
663 int qdf_crypto_aes_gmac(const uint8_t *key, uint16_t key_length,
664 			uint8_t *iv, const uint8_t *aad,
665 			const uint8_t *data, uint16_t data_len, uint8_t *mic)
666 {
667 	struct crypto_aead *tfm;
668 	int ret = 0;
669 	struct scatterlist sg[4];
670 	uint16_t req_size;
671 	struct aead_request *req = NULL;
672 	uint8_t *aad_ptr, *input;
673 
674 	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
675 	if (IS_ERR(tfm)) {
676 		ret = PTR_ERR(tfm);
677 		tfm = NULL;
678 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
679 			  "%s: crypto_alloc_aead failed (%d)", __func__, ret);
680 		goto err_tfm;
681 	}
682 
683 	ret = crypto_aead_setkey(tfm, key, key_length);
684 	if (ret) {
685 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
686 			  "crypto_aead_setkey failed (%d)", ret);
687 		goto err_tfm;
688 	}
689 
690 	ret = crypto_aead_setauthsize(tfm, IEEE80211_MMIE_GMAC_MICLEN);
691 	if (ret) {
692 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
693 			  "crypto_aead_setauthsize failed (%d)", ret);
694 		goto err_tfm;
695 	}
696 
697 	/* Prepare aead request */
698 	req_size = sizeof(*req) + crypto_aead_reqsize(tfm) +
699 			IEEE80211_MMIE_GMAC_MICLEN + AAD_LEN;
700 	req = qdf_mem_malloc(req_size);
701 	if (!req) {
702 		ret = -ENOMEM;
703 		goto err_tfm;
704 	}
705 
706 	input = (uint8_t *)req + sizeof(*req) + crypto_aead_reqsize(tfm);
707 	aad_ptr = input + IEEE80211_MMIE_GMAC_MICLEN;
708 	qdf_mem_copy(aad_ptr, aad, AAD_LEN);
709 
710 	/* Scatter list operations */
711 	sg_init_table(sg, 4);
712 	sg_set_buf(&sg[0], aad_ptr, AAD_LEN);
713 	sg_set_buf(&sg[1], data, data_len);
714 	sg_set_buf(&sg[2], input, IEEE80211_MMIE_GMAC_MICLEN);
715 	sg_set_buf(&sg[3], mic, IEEE80211_MMIE_GMAC_MICLEN);
716 
717 	aead_request_set_tfm(req, tfm);
718 	aead_request_set_crypt(req, sg, sg, 0, iv);
719 	aead_request_set_ad(req,
720 			    AAD_LEN + data_len + IEEE80211_MMIE_GMAC_MICLEN);
721 	crypto_aead_encrypt(req);
722 
723 err_tfm:
724 	if (tfm)
725 		crypto_free_aead(tfm);
726 
727 	if (req)
728 		qdf_mem_free(req);
729 
730 	return ret;
731 }
732 #else
733 int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length,
734 			uint8_t *iv, uint8_t *aad, uint8_t *data,
735 			uint16_t data_len, uint8_t *mic)
736 {
737 	return -EINVAL;
738 }
739 #endif
740