1 /* 2 * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: qdf_crypto.c 21 * 22 * This source file contains linux specific definitions for QDF crypto APIs 23 */ 24 25 /* Include Files */ 26 #include "qdf_crypto.h" 27 #include <linux/export.h> 28 #include <crypto/hash.h> 29 #include <crypto/aes.h> 30 #include <crypto/skcipher.h> 31 #include <crypto/aead.h> 32 #include <linux/ieee80211.h> 33 #include <qdf_module.h> 34 35 /* Function Definitions and Documentation */ 36 #define MAX_HMAC_ELEMENT_CNT 10 37 38 /* 39 * xor: API to calculate xor 40 * @a: first variable 41 * @b: second variable 42 * @len: length of variables 43 */ 44 static void xor(uint8_t *a, const uint8_t *b, size_t len) 45 { 46 unsigned int i; 47 48 for (i = 0; i < len; i++) 49 a[i] ^= b[i]; 50 } 51 52 int qdf_get_hash(uint8_t *type, 53 uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len, 54 int8_t *hash) 55 { 56 return qdf_get_hmac_hash(type, NULL, 0, element_cnt, 57 addr, addr_len, hash); 58 } 59 60 int qdf_get_hmac_hash(uint8_t *type, uint8_t *key, 61 uint32_t keylen, 62 uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len, 63 int8_t *hash) 64 { 65 int i; 66 size_t src_len[MAX_HMAC_ELEMENT_CNT]; 67 68 if (element_cnt > MAX_HMAC_ELEMENT_CNT) { 69 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 70 FL("Invalid element count %d"), element_cnt); 71 return -EINVAL; 72 } 73 74 for (i = 0; i < element_cnt; i++) 75 src_len[i] = addr_len[i]; 76 77 return qdf_get_keyed_hash(type, key, keylen, (const uint8_t **)addr, 78 src_len, element_cnt, hash); 79 } 80 81 QDF_STATUS 82 qdf_default_hmac_sha256_kdf(uint8_t *secret, uint32_t secret_len, 83 uint8_t *label, uint8_t *optional_data, 84 uint32_t optional_data_len, uint8_t *key, 85 uint32_t keylen) 86 { 87 uint8_t tmp_hash[SHA256_DIGEST_SIZE] = {0}; 88 uint8_t count = 1; 89 uint8_t *addr[4]; 90 uint32_t len[4]; 91 uint32_t current_position = 0, remaining_data = SHA256_DIGEST_SIZE; 92 93 addr[0] = tmp_hash; 94 len[0] = SHA256_DIGEST_SIZE; 95 addr[1] = label; 96 len[1] = strlen(label) + 1; 97 addr[2] = optional_data; 98 len[2] = optional_data_len; 99 addr[3] = &count; 100 len[3] = 1; 101 102 if (keylen == 0 || 103 (keylen > (WLAN_MAX_PRF_INTERATIONS_COUNT * SHA256_DIGEST_SIZE))) { 104 qdf_err("invalid key length %d", keylen); 105 return QDF_STATUS_E_FAILURE; 106 } 107 108 /* Create T1 */ 109 if (qdf_get_hmac_hash(HMAC_SHA256_CRYPTO_TYPE, secret, secret_len, 3, 110 &addr[1], &len[1], tmp_hash) < 0) { 111 qdf_err("failed to get hmac hash"); 112 return QDF_STATUS_E_FAILURE; 113 } 114 115 /* Update hash from tmp_hash */ 116 qdf_mem_copy(key + current_position, tmp_hash, remaining_data); 117 current_position += remaining_data; 118 119 for (count = 2; current_position < keylen; count++) { 120 remaining_data = keylen - current_position; 121 if (remaining_data > SHA256_DIGEST_SIZE) 122 remaining_data = SHA256_DIGEST_SIZE; 123 124 /* Create T-n */ 125 if (qdf_get_hmac_hash(HMAC_SHA256_CRYPTO_TYPE, secret, 126 secret_len, 4, addr, len, tmp_hash) < 0) { 127 qdf_err("failed to get hmac hash"); 128 return QDF_STATUS_E_FAILURE; 129 } 130 /* Update hash from tmp_hash */ 131 qdf_mem_copy(key + current_position, tmp_hash, remaining_data); 132 current_position += remaining_data; 133 } 134 135 return QDF_STATUS_SUCCESS; 136 } 137 138 /* qdf_update_dbl from RFC 5297. Length of d is AES_BLOCK_SIZE (128 bits) */ 139 void qdf_update_dbl(uint8_t *d) 140 { 141 int i; 142 uint8_t msb, msb_prev = 0; 143 144 /* left shift by 1 */ 145 for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) { 146 msb = d[i] & 0x80; 147 d[i] = d[i] << 1; 148 d[i] += msb_prev ? 1 : 0; 149 msb_prev = msb; 150 } 151 152 if (msb) 153 d[AES_BLOCK_SIZE - 1] ^= 0x87; 154 } 155 156 static inline void xor_128(const uint8_t *a, const uint8_t *b, uint8_t *out) 157 { 158 uint8_t i; 159 160 for (i = 0; i < AES_BLOCK_SIZE; i++) 161 out[i] = a[i] ^ b[i]; 162 } 163 164 static inline void leftshift_onebit(const uint8_t *input, uint8_t *output) 165 { 166 int i, overflow = 0; 167 168 for (i = (AES_BLOCK_SIZE - 1); i >= 0; i--) { 169 output[i] = input[i] << 1; 170 output[i] |= overflow; 171 overflow = (input[i] & 0x80) ? 1 : 0; 172 } 173 } 174 175 static void generate_subkey(struct crypto_cipher *tfm, uint8_t *k1, uint8_t *k2) 176 { 177 uint8_t l[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE]; 178 const uint8_t const_rb[AES_BLOCK_SIZE] = { 179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87 181 }; 182 const uint8_t const_zero[AES_BLOCK_SIZE] = { 183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 185 }; 186 187 crypto_cipher_encrypt_one(tfm, l, const_zero); 188 189 if ((l[0] & 0x80) == 0) { /* If MSB(l) = 0, then k1 = l << 1 */ 190 leftshift_onebit(l, k1); 191 } else { /* Else k1 = ( l << 1 ) (+) Rb */ 192 leftshift_onebit(l, tmp); 193 xor_128(tmp, const_rb, k1); 194 } 195 196 if ((k1[0] & 0x80) == 0) { 197 leftshift_onebit(k1, k2); 198 } else { 199 leftshift_onebit(k1, tmp); 200 xor_128(tmp, const_rb, k2); 201 } 202 } 203 204 static inline void padding(const uint8_t *lastb, uint8_t *pad, uint16_t length) 205 { 206 uint8_t j; 207 208 /* original last block */ 209 for (j = 0; j < AES_BLOCK_SIZE; j++) { 210 if (j < length) 211 pad[j] = lastb[j]; 212 else if (j == length) 213 pad[j] = 0x80; 214 else 215 pad[j] = 0x00; 216 } 217 } 218 219 int qdf_crypto_aes_128_cmac(const uint8_t *key, const uint8_t *data, 220 uint16_t len, uint8_t *mic) 221 { 222 uint8_t x[AES_BLOCK_SIZE], y[AES_BLOCK_SIZE]; 223 uint8_t m_last[AES_BLOCK_SIZE], padded[AES_BLOCK_SIZE]; 224 uint8_t k1[AES_KEYSIZE_128], k2[AES_KEYSIZE_128]; 225 int cmp_blk; 226 int i, num_block = (len + 15) / AES_BLOCK_SIZE; 227 struct crypto_cipher *tfm; 228 int ret; 229 230 /* 231 * Calculate MIC and then copy 232 */ 233 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 234 if (IS_ERR(tfm)) { 235 ret = PTR_ERR(tfm); 236 qdf_err("crypto_alloc_cipher failed (%d)", ret); 237 return ret; 238 } 239 240 ret = crypto_cipher_setkey(tfm, key, AES_KEYSIZE_128); 241 if (ret) { 242 qdf_err("crypto_cipher_setkey failed (%d)", ret); 243 crypto_free_cipher(tfm); 244 return ret; 245 } 246 247 generate_subkey(tfm, k1, k2); 248 249 if (num_block == 0) { 250 num_block = 1; 251 cmp_blk = 0; 252 } else { 253 cmp_blk = ((len % AES_BLOCK_SIZE) == 0) ? 1 : 0; 254 } 255 256 if (cmp_blk) { 257 /* Last block is complete block */ 258 xor_128(&data[AES_BLOCK_SIZE * (num_block - 1)], k1, m_last); 259 } else { 260 /* Last block is not complete block */ 261 padding(&data[AES_BLOCK_SIZE * (num_block - 1)], padded, 262 len % AES_BLOCK_SIZE); 263 xor_128(padded, k2, m_last); 264 } 265 266 for (i = 0; i < AES_BLOCK_SIZE; i++) 267 x[i] = 0; 268 269 for (i = 0; i < (num_block - 1); i++) { 270 /* y = Mi (+) x */ 271 xor_128(x, &data[AES_BLOCK_SIZE * i], y); 272 /* x = AES-128(KEY, y) */ 273 crypto_cipher_encrypt_one(tfm, x, y); 274 } 275 276 xor_128(x, m_last, y); 277 crypto_cipher_encrypt_one(tfm, x, y); 278 279 crypto_free_cipher(tfm); 280 281 memcpy(mic, x, CMAC_TLEN); 282 283 return 0; 284 } 285 286 /** 287 * set_desc_flags() - set flags variable in the shash_desc struct 288 * @desc: pointer to shash_desc struct 289 * @tfm: pointer to crypto_shash struct 290 * 291 * Set the flags variable in the shash_desc struct by getting the flag 292 * from the crypto_hash struct. The flag is not actually used, prompting 293 * its removal from kernel code in versions 5.2 and above. Thus, for 294 * versions 5.2 and above, do not set the flag variable of shash_desc. 295 */ 296 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) 297 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm) 298 { 299 desc->flags = crypto_shash_get_flags(tfm); 300 } 301 #else 302 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm) 303 { 304 } 305 #endif 306 307 int qdf_get_keyed_hash(const char *alg, const uint8_t *key, 308 unsigned int key_len, const uint8_t *src[], 309 size_t *src_len, size_t num_elements, uint8_t *out) 310 { 311 struct crypto_shash *tfm; 312 int ret; 313 size_t i; 314 315 tfm = crypto_alloc_shash(alg, 0, CRYPTO_ALG_ASYNC); 316 if (IS_ERR(tfm)) { 317 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 318 FL("Failed to allocate transformation for %s: %ld"), 319 alg, PTR_ERR(tfm)); 320 return -EINVAL; 321 } 322 323 if (key && key_len) { 324 ret = crypto_shash_setkey(tfm, key, key_len); 325 if (ret) { 326 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 327 FL("Set key failed for %s, ret:%d"), 328 alg, -ret); 329 goto error; 330 } 331 } 332 333 do { 334 SHASH_DESC_ON_STACK(desc, tfm); 335 desc->tfm = tfm; 336 set_desc_flags(desc, tfm); 337 338 ret = crypto_shash_init(desc); 339 if (ret) { 340 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 341 FL("Failed to init hash for %s, ret:%d"), 342 alg, -ret); 343 goto error; 344 } 345 346 for (i = 0; i < num_elements; i++) { 347 ret = crypto_shash_update(desc, src[i], src_len[i]); 348 if (ret) { 349 QDF_TRACE(QDF_MODULE_ID_QDF, 350 QDF_TRACE_LEVEL_ERROR, 351 FL("Failed to update hash for %s, ret:%d"), 352 alg, -ret); 353 goto error; 354 } 355 } 356 357 ret = crypto_shash_final(desc, out); 358 if (ret) 359 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 360 FL("Failed to get digest for %s, ret:%d"), 361 alg, -ret); 362 } while (0); 363 364 error: 365 crypto_free_shash(tfm); 366 return ret; 367 } 368 369 qdf_export_symbol(qdf_get_keyed_hash); 370 371 /* AES String to Vector from RFC 5297, 'out' should be of length AES_BLOCK_SIZE 372 */ 373 int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[], 374 size_t s_len[], size_t num_s, uint8_t *out) 375 { 376 const char *alg = "cmac(aes)"; 377 uint8_t d[AES_BLOCK_SIZE]; 378 uint8_t buf[AES_BLOCK_SIZE] = { 0 }; 379 size_t buf_len = AES_BLOCK_SIZE; 380 const uint8_t *a[1]; 381 unsigned int i; 382 uint8_t *t = NULL; 383 size_t t_len; 384 int ret; 385 386 if (num_s == 0) { 387 /* V = AES-CMAC(K, <one>) */ 388 buf[0] = 0x01; 389 a[0] = buf; 390 ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, 391 out); 392 return ret; 393 } 394 395 /* D = AES-CMAC(K, <zero>) */ 396 a[0] = buf; 397 ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, d); 398 if (ret) 399 goto error; 400 401 for (i = 0; i < num_s - 1; i++) { 402 /* D = qdf_update_dbl(D) xor AES-CMAC(K, Si) */ 403 qdf_update_dbl(d); 404 ret = qdf_get_keyed_hash(alg, key, key_len, &s[i], &s_len[i], 1, 405 buf); 406 if (ret) 407 goto error; 408 xor(d, buf, AES_BLOCK_SIZE); 409 } 410 411 if (s_len[i] >= AES_BLOCK_SIZE) { 412 /* len(Sn) >= 128 */ 413 /* T = Sn xorend D */ 414 t = qdf_mem_malloc(s_len[i]); 415 if (!t) 416 return -EINVAL; 417 qdf_mem_copy(t, s[i], s_len[i]); 418 xor(t + s_len[i] - AES_BLOCK_SIZE, d, AES_BLOCK_SIZE); 419 t_len = s_len[i]; 420 } else { 421 /* len(Sn) < 128 */ 422 /* T = qdf_update_dbl(D) xor pad(Sn) */ 423 qdf_update_dbl(d); 424 qdf_mem_zero(buf, AES_BLOCK_SIZE); 425 qdf_mem_copy(buf, s[i], s_len[i]); 426 buf[s_len[i]] = 0x80; 427 xor(d, s[i], AES_BLOCK_SIZE); 428 t = d; 429 t_len = AES_BLOCK_SIZE; 430 } 431 432 /* V = AES-CMAC(K, T) */ 433 a[0] = t; 434 ret = qdf_get_keyed_hash(alg, key, key_len, a, &t_len, 1, out); 435 436 error: 437 if (t && t != d) 438 qdf_mem_free(t); 439 return ret; 440 } 441 442 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) 443 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, 444 const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) 445 { 446 struct crypto_skcipher *tfm; 447 struct skcipher_request *req = NULL; 448 struct scatterlist sg_in, sg_out; 449 int ret; 450 451 if (!IS_VALID_CTR_KEY_LEN(key_len)) { 452 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 453 FL("Invalid key length: %u"), key_len); 454 return -EINVAL; 455 } 456 457 tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); 458 if (IS_ERR(tfm)) { 459 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 460 FL("Failed to alloc transformation for ctr(aes):%ld"), 461 PTR_ERR(tfm)); 462 return -EAGAIN; 463 } 464 465 req = skcipher_request_alloc(tfm, GFP_KERNEL); 466 if (!req) { 467 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 468 FL("Failed to allocate request for ctr(aes)")); 469 crypto_free_skcipher(tfm); 470 return -EAGAIN; 471 } 472 473 ret = crypto_skcipher_setkey(tfm, key, key_len); 474 if (ret) { 475 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 476 FL("Set key failed for ctr(aes), ret:%d"), -ret); 477 skcipher_request_free(req); 478 crypto_free_skcipher(tfm); 479 return ret; 480 } 481 482 sg_init_one(&sg_in, src, src_len); 483 sg_init_one(&sg_out, dest, src_len); 484 skcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv); 485 486 if (enc) 487 ret = crypto_skcipher_encrypt(req); 488 else 489 ret = crypto_skcipher_decrypt(req); 490 491 if (ret) { 492 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 493 FL("%s failed for ctr(aes), ret:%d"), 494 enc ? "Encryption" : "Decryption", -ret); 495 } 496 497 skcipher_request_free(req); 498 crypto_free_skcipher(tfm); 499 return ret; 500 } 501 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) 502 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, 503 const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) 504 { 505 struct crypto_ablkcipher *tfm; 506 struct ablkcipher_request *req = NULL; 507 struct scatterlist sg_in, sg_out; 508 int ret; 509 510 if (!IS_VALID_CTR_KEY_LEN(key_len)) { 511 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 512 FL("Invalid key length: %u"), key_len); 513 return -EINVAL; 514 } 515 516 tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); 517 if (IS_ERR(tfm)) { 518 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 519 FL("Failed to alloc transformation for ctr(aes):%ld"), 520 PTR_ERR(tfm)); 521 return -EAGAIN; 522 } 523 524 req = ablkcipher_request_alloc(tfm, GFP_KERNEL); 525 if (!req) { 526 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 527 FL("Failed to allocate request for ctr(aes)")); 528 crypto_free_ablkcipher(tfm); 529 return -EAGAIN; 530 } 531 532 ret = crypto_ablkcipher_setkey(tfm, key, key_len); 533 if (ret) { 534 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 535 FL("Set key failed for ctr(aes), ret:%d"), -ret); 536 ablkcipher_request_free(req); 537 crypto_free_ablkcipher(tfm); 538 return ret; 539 } 540 541 sg_init_one(&sg_in, src, src_len); 542 sg_init_one(&sg_out, dest, src_len); 543 ablkcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv); 544 545 if (enc) 546 ret = crypto_ablkcipher_encrypt(req); 547 else 548 ret = crypto_ablkcipher_decrypt(req); 549 550 if (ret) { 551 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 552 FL("%s failed for ctr(aes), ret:%d"), 553 enc ? "Encryption" : "Decryption", -ret); 554 } 555 556 ablkcipher_request_free(req); 557 crypto_free_ablkcipher(tfm); 558 559 return ret; 560 } 561 #else 562 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, 563 const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) 564 { 565 return -EINVAL; 566 } 567 #endif 568 569 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) 570 int qdf_crypto_aes_gmac(const uint8_t *key, uint16_t key_length, 571 uint8_t *iv, const uint8_t *aad, 572 const uint8_t *data, uint16_t data_len, uint8_t *mic) 573 { 574 struct crypto_aead *tfm; 575 int ret = 0; 576 struct scatterlist sg[4]; 577 uint16_t req_size; 578 struct aead_request *req = NULL; 579 uint8_t *aad_ptr, *input; 580 581 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 582 if (IS_ERR(tfm)) { 583 ret = PTR_ERR(tfm); 584 tfm = NULL; 585 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 586 "%s: crypto_alloc_aead failed (%d)", __func__, ret); 587 goto err_tfm; 588 } 589 590 ret = crypto_aead_setkey(tfm, key, key_length); 591 if (ret) { 592 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 593 "crypto_aead_setkey failed (%d)", ret); 594 goto err_tfm; 595 } 596 597 ret = crypto_aead_setauthsize(tfm, IEEE80211_MMIE_GMAC_MICLEN); 598 if (ret) { 599 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 600 "crypto_aead_setauthsize failed (%d)", ret); 601 goto err_tfm; 602 } 603 604 /* Prepare aead request */ 605 req_size = sizeof(*req) + crypto_aead_reqsize(tfm) + 606 IEEE80211_MMIE_GMAC_MICLEN + AAD_LEN; 607 req = qdf_mem_malloc(req_size); 608 if (!req) { 609 ret = -ENOMEM; 610 goto err_tfm; 611 } 612 613 input = (uint8_t *)req + sizeof(*req) + crypto_aead_reqsize(tfm); 614 aad_ptr = input + IEEE80211_MMIE_GMAC_MICLEN; 615 qdf_mem_copy(aad_ptr, aad, AAD_LEN); 616 617 /* Scatter list operations */ 618 sg_init_table(sg, 4); 619 sg_set_buf(&sg[0], aad_ptr, AAD_LEN); 620 sg_set_buf(&sg[1], data, data_len); 621 sg_set_buf(&sg[2], input, IEEE80211_MMIE_GMAC_MICLEN); 622 sg_set_buf(&sg[3], mic, IEEE80211_MMIE_GMAC_MICLEN); 623 624 aead_request_set_tfm(req, tfm); 625 aead_request_set_crypt(req, sg, sg, 0, iv); 626 aead_request_set_ad(req, 627 AAD_LEN + data_len + IEEE80211_MMIE_GMAC_MICLEN); 628 crypto_aead_encrypt(req); 629 630 err_tfm: 631 if (tfm) 632 crypto_free_aead(tfm); 633 634 if (req) 635 qdf_mem_free(req); 636 637 return ret; 638 } 639 #else 640 int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length, 641 uint8_t *iv, uint8_t *aad, uint8_t *data, 642 uint16_t data_len, uint8_t *mic) 643 { 644 return -EINVAL; 645 } 646 #endif 647