xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_crypto.c
21  *
22  * This source file contains linux specific definitions for QDF crypto APIs
23  */
24 
25 /* Include Files */
26 #include "qdf_crypto.h"
27 #include <linux/export.h>
28 #include <crypto/hash.h>
29 #include <crypto/aes.h>
30 #include <crypto/skcipher.h>
31 #include <crypto/aead.h>
32 #include <linux/ieee80211.h>
33 
34 /* Function Definitions and Documentation */
35 #define MAX_HMAC_ELEMENT_CNT 10
36 
37 /*
38  * xor: API to calculate xor
39  * @a: first variable
40  * @b: second variable
41  * @len: length of variables
42  */
43 static void xor(uint8_t *a, const uint8_t *b, size_t len)
44 {
45 	unsigned int i;
46 
47 	for (i = 0; i < len; i++)
48 	a[i] ^= b[i];
49 }
50 
51 int qdf_get_hash(uint8_t *type,
52 		uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len,
53 		int8_t *hash)
54 {
55 	return qdf_get_hmac_hash(type, NULL, 0, element_cnt,
56 				 addr, addr_len, hash);
57 }
58 
59 int qdf_get_hmac_hash(uint8_t *type, uint8_t *key,
60 		uint32_t keylen,
61 		uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len,
62 		int8_t *hash)
63 {
64 	int i;
65 	size_t src_len[MAX_HMAC_ELEMENT_CNT];
66 
67 	if (element_cnt > MAX_HMAC_ELEMENT_CNT) {
68 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
69 			  FL("Invalid element count %d"), element_cnt);
70 		return -EINVAL;
71 	}
72 
73 	for (i = 0; i < element_cnt; i++)
74 		src_len[i] = addr_len[i];
75 
76 	return qdf_get_keyed_hash(type, key, keylen, (const uint8_t **)addr,
77 				  src_len, element_cnt,  hash);
78 }
79 
80 /* qdf_update_dbl from RFC 5297. Length of d is AES_BLOCK_SIZE (128 bits) */
81 void qdf_update_dbl(uint8_t *d)
82 {
83 	int i;
84 	uint8_t msb, msb_prev = 0;
85 
86 	/* left shift by 1 */
87 	for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) {
88 		msb = d[i] & 0x80;
89 		d[i] = d[i] << 1;
90 		d[i] += msb_prev ? 1 : 0;
91 		msb_prev = msb;
92 	}
93 
94 	if (msb)
95 		d[AES_BLOCK_SIZE - 1] ^= 0x87;
96 }
97 
98 static inline void xor_128(const uint8_t *a, const uint8_t *b, uint8_t *out)
99 {
100 	uint8_t i;
101 
102 	for (i = 0; i < AES_BLOCK_SIZE; i++)
103 		out[i] = a[i] ^ b[i];
104 }
105 
106 static inline void leftshift_onebit(const uint8_t *input, uint8_t *output)
107 {
108 	int i, overflow = 0;
109 
110 	for (i = (AES_BLOCK_SIZE - 1); i >= 0; i--) {
111 		output[i] = input[i] << 1;
112 		output[i] |= overflow;
113 		overflow = (input[i] & 0x80) ? 1 : 0;
114 	}
115 }
116 
117 static void generate_subkey(struct crypto_cipher *tfm, uint8_t *k1, uint8_t *k2)
118 {
119 	uint8_t l[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE];
120 	const uint8_t const_rb[AES_BLOCK_SIZE] = {
121 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
122 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87
123 	};
124 	const uint8_t const_zero[AES_BLOCK_SIZE] = {
125 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
126 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
127 	};
128 
129 	crypto_cipher_encrypt_one(tfm, l, const_zero);
130 
131 	if ((l[0] & 0x80) == 0) {       /* If MSB(l) = 0, then k1 = l << 1 */
132 		leftshift_onebit(l, k1);
133 	} else {                /* Else k1 = ( l << 1 ) (+) Rb */
134 		leftshift_onebit(l, tmp);
135 		xor_128(tmp, const_rb, k1);
136 	}
137 
138 	if ((k1[0] & 0x80) == 0) {
139 		leftshift_onebit(k1, k2);
140 	} else {
141 		leftshift_onebit(k1, tmp);
142 		xor_128(tmp, const_rb, k2);
143 	}
144 }
145 
146 static inline void padding(const uint8_t *lastb, uint8_t *pad, uint16_t length)
147 {
148 	uint8_t j;
149 
150 	/* original last block */
151 	for (j = 0; j < AES_BLOCK_SIZE; j++) {
152 		if (j < length)
153 			pad[j] = lastb[j];
154 		else if (j == length)
155 			pad[j] = 0x80;
156 		else
157 			pad[j] = 0x00;
158 	}
159 }
160 
161 int qdf_crypto_aes_128_cmac(const uint8_t *key, const uint8_t *data,
162 			    uint16_t len, uint8_t *mic)
163 {
164 	uint8_t x[AES_BLOCK_SIZE], y[AES_BLOCK_SIZE];
165 	uint8_t m_last[AES_BLOCK_SIZE], padded[AES_BLOCK_SIZE];
166 	uint8_t k1[AES_KEYSIZE_128], k2[AES_KEYSIZE_128];
167 	int cmp_blk;
168 	int i, num_block = (len + 15) / AES_BLOCK_SIZE;
169 	struct crypto_cipher *tfm;
170 	int ret;
171 
172 	/*
173 	 * Calculate MIC and then copy
174 	 */
175 	tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
176 	if (IS_ERR(tfm)) {
177 		ret = PTR_ERR(tfm);
178 		qdf_err("crypto_alloc_cipher failed (%d)", ret);
179 		return ret;
180 	}
181 
182 	ret = crypto_cipher_setkey(tfm, key, AES_KEYSIZE_128);
183 	if (ret) {
184 		qdf_err("crypto_cipher_setkey failed (%d)", ret);
185 		crypto_free_cipher(tfm);
186 		return ret;
187 	}
188 
189 	generate_subkey(tfm, k1, k2);
190 
191 	if (num_block == 0) {
192 		num_block = 1;
193 		cmp_blk = 0;
194 	} else {
195 		cmp_blk = ((len % AES_BLOCK_SIZE) == 0) ? 1 : 0;
196 	}
197 
198 	if (cmp_blk) {
199 		/* Last block is complete block */
200 		xor_128(&data[AES_BLOCK_SIZE * (num_block - 1)], k1, m_last);
201 	} else {
202 		/* Last block is not complete block */
203 		padding(&data[AES_BLOCK_SIZE * (num_block - 1)], padded,
204 			len % AES_BLOCK_SIZE);
205 		xor_128(padded, k2, m_last);
206 	}
207 
208 	for (i = 0; i < AES_BLOCK_SIZE; i++)
209 		x[i] = 0;
210 
211 	for (i = 0; i < (num_block - 1); i++) {
212 		/* y = Mi (+) x */
213 		xor_128(x, &data[AES_BLOCK_SIZE * i], y);
214 		/* x = AES-128(KEY, y) */
215 		crypto_cipher_encrypt_one(tfm, x, y);
216 	}
217 
218 	xor_128(x, m_last, y);
219 	crypto_cipher_encrypt_one(tfm, x, y);
220 
221 	crypto_free_cipher(tfm);
222 
223 	memcpy(mic, x, CMAC_TLEN);
224 
225 	return 0;
226 }
227 
228 /**
229  * set_desc_flags() - set flags variable in the shash_desc struct
230  * @desc: pointer to shash_desc struct
231  * @tfm: pointer to crypto_shash struct
232  *
233  * Set the flags variable in the shash_desc struct by getting the flag
234  * from the crypto_hash struct. The flag is not actually used, prompting
235  * its removal from kernel code in versions 5.2 and above. Thus, for
236  * versions 5.2 and above, do not set the flag variable of shash_desc.
237  */
238 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0))
239 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm)
240 {
241 	desc->flags = crypto_shash_get_flags(tfm);
242 }
243 #else
244 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm)
245 {
246 }
247 #endif
248 
249 int qdf_get_keyed_hash(const char *alg, const uint8_t *key,
250 			unsigned int key_len, const uint8_t *src[],
251 			size_t *src_len, size_t num_elements, uint8_t *out)
252 {
253 	struct crypto_shash *tfm;
254 	int ret;
255 	size_t i;
256 
257 	tfm = crypto_alloc_shash(alg, 0, CRYPTO_ALG_ASYNC);
258 	if (IS_ERR(tfm)) {
259 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
260 			  FL("Failed to allocate transformation for %s: %ld"),
261 			  alg, PTR_ERR(tfm));
262 		return -EINVAL;
263 	}
264 
265 	if (key && key_len) {
266 		ret = crypto_shash_setkey(tfm, key, key_len);
267 		if (ret) {
268 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
269 				  FL("Set key failed for %s, ret:%d"),
270 				  alg, -ret);
271 			goto error;
272 		}
273 	}
274 
275 	do {
276 		SHASH_DESC_ON_STACK(desc, tfm);
277 		desc->tfm = tfm;
278 		set_desc_flags(desc, tfm);
279 
280 		ret = crypto_shash_init(desc);
281 		if (ret) {
282 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
283 				  FL("Failed to init hash for %s, ret:%d"),
284 				  alg, -ret);
285 			goto error;
286 		}
287 
288 		for (i = 0; i < num_elements; i++) {
289 			ret = crypto_shash_update(desc, src[i], src_len[i]);
290 			if (ret) {
291 				QDF_TRACE(QDF_MODULE_ID_QDF,
292 					  QDF_TRACE_LEVEL_ERROR,
293 					  FL("Failed to update hash for %s, ret:%d"),
294 					  alg, -ret);
295 				goto error;
296 			}
297 		}
298 
299 		ret = crypto_shash_final(desc, out);
300 		if (ret)
301 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
302 				  FL("Failed to get digest for %s, ret:%d"),
303 				  alg, -ret);
304 	} while (0);
305 
306 error:
307 	crypto_free_shash(tfm);
308 	return ret;
309 }
310 
311 /* AES String to Vector from RFC 5297, 'out' should be of length AES_BLOCK_SIZE
312  */
313 int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[],
314 		   size_t s_len[], size_t num_s, uint8_t *out)
315 {
316 	const char *alg = "cmac(aes)";
317 	uint8_t d[AES_BLOCK_SIZE];
318 	uint8_t buf[AES_BLOCK_SIZE] = { 0 };
319 	size_t buf_len = AES_BLOCK_SIZE;
320 	const uint8_t *a[1];
321 	unsigned int i;
322 	uint8_t *t = NULL;
323 	size_t t_len;
324 	int ret;
325 
326 	if (num_s == 0) {
327 		/* V = AES-CMAC(K, <one>) */
328 		buf[0] = 0x01;
329 		a[0] = buf;
330 		ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1,
331 					 out);
332 		return ret;
333 	}
334 
335 	/* D = AES-CMAC(K, <zero>) */
336 	a[0] = buf;
337 	ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, d);
338 	if (ret)
339 		goto error;
340 
341 	for (i = 0; i < num_s - 1; i++) {
342 		/* D = qdf_update_dbl(D) xor AES-CMAC(K, Si) */
343 		qdf_update_dbl(d);
344 		ret = qdf_get_keyed_hash(alg, key, key_len, &s[i], &s_len[i], 1,
345 					 buf);
346 		if (ret)
347 			goto error;
348 		xor(d, buf, AES_BLOCK_SIZE);
349 	}
350 
351 	if (s_len[i] >= AES_BLOCK_SIZE) {
352 		/* len(Sn) >= 128 */
353 		/* T = Sn xorend D */
354 		t = qdf_mem_malloc(s_len[i]);
355 		if (!t)
356 			return -EINVAL;
357 		qdf_mem_copy(t, s[i], s_len[i]);
358 		xor(t + s_len[i] - AES_BLOCK_SIZE, d, AES_BLOCK_SIZE);
359 		t_len = s_len[i];
360 	} else {
361 		/* len(Sn) < 128 */
362 		/* T = qdf_update_dbl(D) xor pad(Sn) */
363 		qdf_update_dbl(d);
364 		qdf_mem_zero(buf, AES_BLOCK_SIZE);
365 		qdf_mem_copy(buf, s[i], s_len[i]);
366 		buf[s_len[i]] = 0x80;
367 		xor(d, s[i], AES_BLOCK_SIZE);
368 		t = d;
369 		t_len = AES_BLOCK_SIZE;
370 	}
371 
372 	/* V = AES-CMAC(K, T) */
373 	a[0] = t;
374 	ret = qdf_get_keyed_hash(alg, key, key_len, a, &t_len, 1, out);
375 
376 error:
377 	if (t && t != d)
378 		qdf_mem_free(t);
379 	return ret;
380 }
381 
382 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
383 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
384 		const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
385 {
386 	struct crypto_skcipher *tfm;
387 	struct skcipher_request *req = NULL;
388 	struct scatterlist sg_in, sg_out;
389 	int ret;
390 
391 	if (!IS_VALID_CTR_KEY_LEN(key_len)) {
392 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
393 			  FL("Invalid key length: %u"), key_len);
394 		return -EINVAL;
395 	}
396 
397 	tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
398 	if (IS_ERR(tfm)) {
399 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
400 			  FL("Failed to alloc transformation for ctr(aes):%ld"),
401 			  PTR_ERR(tfm));
402 		return -EAGAIN;
403 	}
404 
405 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
406 	if (!req) {
407 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
408 			  FL("Failed to allocate request for ctr(aes)"));
409 		crypto_free_skcipher(tfm);
410 		return -EAGAIN;
411 	}
412 
413 	ret = crypto_skcipher_setkey(tfm, key, key_len);
414 	if (ret) {
415 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
416 			  FL("Set key failed for ctr(aes), ret:%d"), -ret);
417 		skcipher_request_free(req);
418 		crypto_free_skcipher(tfm);
419 		return ret;
420 	}
421 
422 	sg_init_one(&sg_in, src, src_len);
423 	sg_init_one(&sg_out, dest, src_len);
424 	skcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv);
425 
426 	if (enc)
427 		ret = crypto_skcipher_encrypt(req);
428 	else
429 		ret = crypto_skcipher_decrypt(req);
430 
431 	if (ret) {
432 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
433 			  FL("%s failed for ctr(aes), ret:%d"),
434 			  enc ? "Encryption" : "Decryption", -ret);
435 	}
436 
437 	skcipher_request_free(req);
438 	crypto_free_skcipher(tfm);
439 	return ret;
440 }
441 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
442 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
443 		const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
444 {
445 	struct crypto_ablkcipher *tfm;
446 	struct ablkcipher_request *req = NULL;
447 	struct scatterlist sg_in, sg_out;
448 	int ret;
449 
450 	if (!IS_VALID_CTR_KEY_LEN(key_len)) {
451 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
452 			  FL("Invalid key length: %u"), key_len);
453 		return -EINVAL;
454 	}
455 
456 	tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
457 	if (IS_ERR(tfm)) {
458 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
459 			  FL("Failed to alloc transformation for ctr(aes):%ld"),
460 			  PTR_ERR(tfm));
461 		return -EAGAIN;
462 	}
463 
464 	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
465 	if (!req) {
466 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
467 			  FL("Failed to allocate request for ctr(aes)"));
468 		crypto_free_ablkcipher(tfm);
469 		return -EAGAIN;
470 	}
471 
472 	ret = crypto_ablkcipher_setkey(tfm, key, key_len);
473 	if (ret) {
474 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
475 			  FL("Set key failed for ctr(aes), ret:%d"), -ret);
476 		ablkcipher_request_free(req);
477 		crypto_free_ablkcipher(tfm);
478 		return ret;
479 	}
480 
481 	sg_init_one(&sg_in, src, src_len);
482 	sg_init_one(&sg_out, dest, src_len);
483 	ablkcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv);
484 
485 	if (enc)
486 		ret = crypto_ablkcipher_encrypt(req);
487 	else
488 		ret = crypto_ablkcipher_decrypt(req);
489 
490 	if (ret) {
491 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
492 			  FL("%s failed for ctr(aes), ret:%d"),
493 			  enc ? "Encryption" : "Decryption", -ret);
494 	}
495 
496 	ablkcipher_request_free(req);
497 	crypto_free_ablkcipher(tfm);
498 
499 	return ret;
500 }
501 #else
502 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
503 		const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
504 {
505 	return -EINVAL;
506 }
507 #endif
508 
509 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
510 int qdf_crypto_aes_gmac(const uint8_t *key, uint16_t key_length,
511 			uint8_t *iv, const uint8_t *aad,
512 			const uint8_t *data, uint16_t data_len, uint8_t *mic)
513 {
514 	struct crypto_aead *tfm;
515 	int ret = 0;
516 	struct scatterlist sg[4];
517 	uint16_t req_size;
518 	struct aead_request *req = NULL;
519 	uint8_t *aad_ptr, *input;
520 
521 	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
522 	if (IS_ERR(tfm)) {
523 		ret = PTR_ERR(tfm);
524 		tfm = NULL;
525 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
526 			  "%s: crypto_alloc_aead failed (%d)", __func__, ret);
527 		goto err_tfm;
528 	}
529 
530 	ret = crypto_aead_setkey(tfm, key, key_length);
531 	if (ret) {
532 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
533 			  "crypto_aead_setkey failed (%d)", ret);
534 		goto err_tfm;
535 	}
536 
537 	ret = crypto_aead_setauthsize(tfm, IEEE80211_MMIE_GMAC_MICLEN);
538 	if (ret) {
539 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
540 			  "crypto_aead_setauthsize failed (%d)", ret);
541 		goto err_tfm;
542 	}
543 
544 	/* Prepare aead request */
545 	req_size = sizeof(*req) + crypto_aead_reqsize(tfm) +
546 			IEEE80211_MMIE_GMAC_MICLEN + AAD_LEN;
547 	req = qdf_mem_malloc(req_size);
548 	if (!req) {
549 		ret = -ENOMEM;
550 		goto err_tfm;
551 	}
552 
553 	input = (uint8_t *)req + sizeof(*req) + crypto_aead_reqsize(tfm);
554 	aad_ptr = input + IEEE80211_MMIE_GMAC_MICLEN;
555 	qdf_mem_copy(aad_ptr, aad, AAD_LEN);
556 
557 	/* Scatter list operations */
558 	sg_init_table(sg, 4);
559 	sg_set_buf(&sg[0], aad_ptr, AAD_LEN);
560 	sg_set_buf(&sg[1], data, data_len);
561 	sg_set_buf(&sg[2], input, IEEE80211_MMIE_GMAC_MICLEN);
562 	sg_set_buf(&sg[3], mic, IEEE80211_MMIE_GMAC_MICLEN);
563 
564 	aead_request_set_tfm(req, tfm);
565 	aead_request_set_crypt(req, sg, sg, 0, iv);
566 	aead_request_set_ad(req,
567 			    AAD_LEN + data_len + IEEE80211_MMIE_GMAC_MICLEN);
568 	crypto_aead_encrypt(req);
569 
570 err_tfm:
571 	if (tfm)
572 		crypto_free_aead(tfm);
573 
574 	if (req)
575 		qdf_mem_free(req);
576 
577 	return ret;
578 }
579 #else
580 int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length,
581 			uint8_t *iv, uint8_t *aad, uint8_t *data,
582 			uint16_t data_len, uint8_t *mic)
583 {
584 	return -EINVAL;
585 }
586 #endif
587