1 /* 2 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <linux/mmc/card.h> 20 #include <linux/mmc/mmc.h> 21 #include <linux/mmc/host.h> 22 #include <linux/mmc/sdio_func.h> 23 #include <linux/mmc/sdio_ids.h> 24 #include <linux/mmc/sdio.h> 25 #include <linux/mmc/sd.h> 26 #include <linux/kthread.h> 27 #include <linux/version.h> 28 #include <linux/module.h> 29 #include <qdf_atomic.h> 30 #include <cds_utils.h> 31 #include <qdf_timer.h> 32 #include <cds_api.h> 33 #include <qdf_time.h> 34 #include "hif_sdio_dev.h" 35 #include "if_sdio.h" 36 #include "regtable_sdio.h" 37 #include "wma_api.h" 38 #include "hif_internal.h" 39 40 /* by default setup a bounce buffer for the data packets, 41 * if the underlying host controller driver 42 * does not use DMA you may be able to skip this step 43 * and save the memory allocation and transfer time 44 */ 45 #define HIF_USE_DMA_BOUNCE_BUFFER 1 46 #define ATH_MODULE_NAME hif 47 #include "a_debug.h" 48 49 #if HIF_USE_DMA_BOUNCE_BUFFER 50 /* macro to check if DMA buffer is WORD-aligned and DMA-able. 51 * Most host controllers assume the 52 * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack). 53 * virt_addr_valid check fails on stack memory. 54 */ 55 #define BUFFER_NEEDS_BOUNCE(buffer) (((unsigned long)(buffer) & 0x3) || \ 56 !virt_addr_valid((buffer))) 57 #else 58 #define BUFFER_NEEDS_BOUNCE(buffer) (false) 59 #endif 60 #define MAX_HIF_DEVICES 2 61 #ifdef HIF_MBOX_SLEEP_WAR 62 #define HIF_MIN_SLEEP_INACTIVITY_TIME_MS 50 63 #define HIF_SLEEP_DISABLE_UPDATE_DELAY 1 64 #define HIF_IS_WRITE_REQUEST_MBOX1_TO_3(request) \ 65 ((request->request & HIF_SDIO_WRITE) && \ 66 (request->address >= 0x1000 && \ 67 request->address < 0x1FFFF)) 68 #endif 69 70 unsigned int mmcbuswidth; 71 /* PERM:S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH */ 72 module_param(mmcbuswidth, uint, 0644); 73 MODULE_PARM_DESC(mmcbuswidth, 74 "Set MMC driver Bus Width: 1-1Bit, 4-4Bit, 8-8Bit"); 75 76 unsigned int mmcclock; 77 module_param(mmcclock, uint, 0644); 78 MODULE_PARM_DESC(mmcclock, "Set MMC driver Clock value"); 79 80 unsigned int brokenirq; 81 module_param(brokenirq, uint, 0644); 82 MODULE_PARM_DESC(brokenirq, 83 "Set as 1 to use polling method instead of interrupt mode"); 84 85 unsigned int forcesleepmode; 86 module_param(forcesleepmode, uint, 0644); 87 MODULE_PARM_DESC(forcesleepmode, 88 "Set sleep mode: 0-host capbility, 1-force WOW, 2-force DeepSleep, 3-force CutPower"); 89 90 #ifdef CONFIG_X86 91 unsigned int asyncintdelay = 2; 92 module_param(asyncintdelay, uint, 0644); 93 MODULE_PARM_DESC(asyncintdelay, 94 "Delay clock count for async interrupt, 2 is default, valid values are 1 and 2"); 95 #else 96 unsigned int asyncintdelay; 97 module_param(asyncintdelay, uint, 0644); 98 MODULE_PARM_DESC(asyncintdelay, 99 "Delay clock count for async interrupt, 0 is default, valid values are 1 and 2"); 100 #endif 101 102 unsigned int forcecard; 103 module_param(forcecard, uint, 0644); 104 MODULE_PARM_DESC(forcecard, 105 "Ignore card capabilities information to switch bus mode"); 106 107 unsigned int debugcccr = 1; 108 module_param(debugcccr, uint, 0644); 109 MODULE_PARM_DESC(debugcccr, "Output this cccr values"); 110 111 unsigned int writecccr1; 112 module_param(writecccr1, uint, 0644); 113 unsigned int writecccr1value; 114 module_param(writecccr1value, uint, 0644); 115 116 unsigned int writecccr2; 117 module_param(writecccr2, uint, 0644); 118 unsigned int writecccr2value; 119 module_param(writecccr2value, uint, 0644); 120 121 unsigned int writecccr3; 122 module_param(writecccr3, uint, 0644); 123 unsigned int writecccr3value; 124 module_param(writecccr3value, uint, 0644); 125 126 unsigned int writecccr4; 127 module_param(writecccr4, uint, 0644); 128 129 unsigned int writecccr4value; 130 module_param(writecccr4value, uint, 0644); 131 132 unsigned int modstrength; 133 module_param(modstrength, uint, 0644); 134 MODULE_PARM_DESC(modstrength, "Adjust internal driver strength"); 135 136 #define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) 137 #define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) 138 static struct hif_sdio_dev *add_hif_device(struct sdio_func *func); 139 static struct hif_sdio_dev *get_hif_device(struct sdio_func *func); 140 static void del_hif_device(struct hif_sdio_dev *device); 141 static int func0_cmd52_write_byte(struct mmc_card *card, unsigned int address, 142 unsigned char byte); 143 static int func0_cmd52_read_byte(struct mmc_card *card, unsigned int address, 144 unsigned char *byte); 145 146 int reset_sdio_on_unload; 147 module_param(reset_sdio_on_unload, int, 0644); 148 149 uint32_t nohifscattersupport = 1; 150 151 uint32_t forcedriverstrength = 1; /* force driver strength to type D */ 152 153 /* ------ Static Variables ------ */ 154 static const struct sdio_device_id ar6k_id_table[] = { 155 #ifdef AR6002_HEADERS_DEF 156 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x0))}, 157 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x1))}, 158 #endif 159 #ifdef AR6003_HEADERS_DEF 160 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, 161 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, 162 #endif 163 #ifdef AR6004_HEADERS_DEF 164 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, 165 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, 166 #endif 167 #ifdef AR6320_HEADERS_DEF 168 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x0))}, 169 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x1))}, 170 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x2))}, 171 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x3))}, 172 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x4))}, 173 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x5))}, 174 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x6))}, 175 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x7))}, 176 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x8))}, 177 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x9))}, 178 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xA))}, 179 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xB))}, 180 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xC))}, 181 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xD))}, 182 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xE))}, 183 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xF))}, 184 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x0))}, 185 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x1))}, 186 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x2))}, 187 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x3))}, 188 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x4))}, 189 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x5))}, 190 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x6))}, 191 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x7))}, 192 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x8))}, 193 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x9))}, 194 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xA))}, 195 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xB))}, 196 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xC))}, 197 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xD))}, 198 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xE))}, 199 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xF))}, 200 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x0))}, 201 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x1))}, 202 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x2))}, 203 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x3))}, 204 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x4))}, 205 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x5))}, 206 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x6))}, 207 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x7))}, 208 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x8))}, 209 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x9))}, 210 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xA))}, 211 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xB))}, 212 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xC))}, 213 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xD))}, 214 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xE))}, 215 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xF))}, 216 {SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x0))}, 217 {SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x1))}, 218 #endif 219 { /* null */ }, 220 }; 221 222 /* make sure we only unregister when registered. */ 223 static int registered; 224 225 struct osdrv_callbacks osdrv_callbacks; 226 uint32_t onebitmode; 227 uint32_t busspeedlow; 228 uint32_t debughif; 229 230 static struct hif_sdio_dev *hif_devices[MAX_HIF_DEVICES]; 231 232 static void reset_all_cards(void); 233 static QDF_STATUS hif_disable_func(struct hif_sdio_dev *device, 234 struct sdio_func *func); 235 static QDF_STATUS hif_enable_func(struct hif_sdio_dev *device, 236 struct sdio_func *func); 237 238 #if defined(WLAN_DEBUG) || defined(DEBUG) 239 ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, 240 "hif", 241 "(Linux MMC) Host Interconnect Framework", 242 ATH_DEBUG_MASK_DEFAULTS, 0, NULL); 243 #endif 244 245 static int hif_sdio_init_callbacks(struct osdrv_callbacks *callbacks) 246 { 247 int status = 0; 248 /* store the callback handlers */ 249 osdrv_callbacks = *callbacks; 250 251 /* Register with bus driver core is done from HDD */ 252 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("%s: HIFInit registering\n", 253 __func__)); 254 registered = 1; 255 256 return status; 257 } 258 static void hif_sdio_remove_callbacks(void) 259 { 260 qdf_mem_zero(&osdrv_callbacks, sizeof(osdrv_callbacks)); 261 } 262 263 264 /** 265 * hif_init() - Initializes the driver callbacks 266 * @callbacks: pointer to driver callback structure 267 * 268 * Return: 0 on success, error number otherwise. 269 */ 270 QDF_STATUS hif_init(struct osdrv_callbacks *callbacks) 271 { 272 int status; 273 274 AR_DEBUG_ASSERT(callbacks != NULL); 275 A_REGISTER_MODULE_DEBUG_INFO(hif); 276 277 HIF_ENTER(); 278 279 status = hif_sdio_init_callbacks(callbacks); 280 AR_DEBUG_ASSERT(status == 0); 281 282 if (status != 0) { 283 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 284 ("%s sdio_register_driver failed!", __func__)); 285 return QDF_STATUS_E_FAILURE; 286 } 287 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 288 ("%s sdio_register_driver successful", __func__)); 289 290 return QDF_STATUS_SUCCESS; 291 292 } 293 294 /** 295 * __hif_read_write() - sdio read/write wrapper 296 * @device: pointer to hif device structure 297 * @address: address to read 298 * @buffer: buffer to hold read/write data 299 * @length: length to read/write 300 * @request: read/write/sync/async request 301 * @context: pointer to hold calling context 302 * 303 * Return: 0 on success, error number otherwise. 304 */ 305 static QDF_STATUS 306 __hif_read_write(struct hif_sdio_dev *device, 307 uint32_t address, 308 char *buffer, 309 uint32_t length, uint32_t request, void *context) 310 { 311 uint8_t opcode; 312 QDF_STATUS status = QDF_STATUS_SUCCESS; 313 int ret = A_OK; 314 uint8_t *tbuffer; 315 bool bounced = false; 316 317 AR_DEBUG_ASSERT(device != NULL); 318 AR_DEBUG_ASSERT(device->func != NULL); 319 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 320 ("__hif_read_write, addr:0X%06X, len:%08d, %s, %s\n", 321 address, length, 322 request & HIF_SDIO_READ ? "Read " : "Write", 323 request & HIF_ASYNCHRONOUS ? "Async" : "Sync ")); 324 325 do { 326 if (request & HIF_EXTENDED_IO) { 327 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 328 ("%s: Command type: CMD53\n", __func__)); 329 } else { 330 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 331 ("%s: Invalid command type: 0x%08x\n", 332 __func__, request)); 333 status = QDF_STATUS_E_INVAL; 334 break; 335 } 336 337 if (request & HIF_BLOCK_BASIS) { 338 /* round to whole block length size */ 339 length = 340 (length / HIF_MBOX_BLOCK_SIZE) * 341 HIF_MBOX_BLOCK_SIZE; 342 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 343 ("%s: Block mode (BlockLen: %d)\n", 344 __func__, length)); 345 } else if (request & HIF_BYTE_BASIS) { 346 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 347 ("%s: Byte mode (BlockLen: %d)\n", 348 __func__, length)); 349 } else { 350 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 351 ("%s: Invalid data mode: 0x%08x\n", 352 __func__, request)); 353 status = QDF_STATUS_E_INVAL; 354 break; 355 } 356 if (request & HIF_SDIO_WRITE) { 357 struct hif_device_mbox_info MailBoxInfo; 358 unsigned int mboxLength = 0; 359 360 hif_configure_device(device, 361 HIF_DEVICE_GET_MBOX_ADDR, 362 &MailBoxInfo, sizeof(MailBoxInfo)); 363 if (address >= 0x800 && address < 0xC00) { 364 /* Host control register and CIS Window */ 365 mboxLength = 0; 366 } else if (address == MailBoxInfo.mbox_addresses[0] 367 || address == MailBoxInfo.mbox_addresses[1] 368 || address == MailBoxInfo.mbox_addresses[2] 369 || address == 370 MailBoxInfo.mbox_addresses[3]) { 371 mboxLength = HIF_MBOX_WIDTH; 372 } else if (address == 373 MailBoxInfo.mbox_prop[0].extended_address) { 374 mboxLength = 375 MailBoxInfo.mbox_prop[0].extended_size; 376 } else if (address == 377 MailBoxInfo.mbox_prop[1].extended_address) { 378 mboxLength = 379 MailBoxInfo.mbox_prop[1].extended_size; 380 } else { 381 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 382 ("Invalid written address: 0x%08x\n", 383 address)); 384 break; 385 } 386 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 387 ("address:%08X, Length:0x%08X, Dummy:0x%04X, Final:0x%08X\n", 388 address, length, 389 (request & HIF_DUMMY_SPACE_MASK) >> 16, 390 mboxLength == 391 0 ? address : address + (mboxLength - 392 length))); 393 if (mboxLength != 0) { 394 if (length > mboxLength) { 395 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 396 ("%s: written length(0x%08X) larger than mbox len(0x%08x)\n", 397 __func__, length, mboxLength)); 398 break; 399 } 400 address += (mboxLength - length); 401 /* 402 * plus dummy byte count 403 */ 404 address += ((request & 405 HIF_DUMMY_SPACE_MASK) >> 16); 406 } 407 } 408 409 if (request & HIF_FIXED_ADDRESS) { 410 opcode = CMD53_FIXED_ADDRESS; 411 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 412 ("%s: Address mode: Fixed 0x%X\n", 413 __func__, address)); 414 } else if (request & HIF_INCREMENTAL_ADDRESS) { 415 opcode = CMD53_INCR_ADDRESS; 416 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 417 ("%s: Address mode: Incremental 0x%X\n", 418 __func__, address)); 419 } else { 420 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 421 ("%s: Invalid address mode: 0x%08x\n", 422 __func__, request)); 423 status = QDF_STATUS_E_INVAL; 424 break; 425 } 426 427 if (request & HIF_SDIO_WRITE) { 428 #if HIF_USE_DMA_BOUNCE_BUFFER 429 if (BUFFER_NEEDS_BOUNCE(buffer)) { 430 AR_DEBUG_ASSERT(device->dma_buffer != NULL); 431 tbuffer = device->dma_buffer; 432 /* copy the write data to the dma buffer */ 433 AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); 434 if (length > HIF_DMA_BUFFER_SIZE) { 435 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 436 ("%s: Invalid write length: %d\n", 437 __func__, length)); 438 status = QDF_STATUS_E_INVAL; 439 break; 440 } 441 memcpy(tbuffer, buffer, length); 442 bounced = true; 443 } else { 444 tbuffer = buffer; 445 } 446 #else 447 tbuffer = buffer; 448 #endif 449 if (opcode == CMD53_FIXED_ADDRESS && tbuffer != NULL) { 450 ret = 451 sdio_writesb(device->func, address, 452 tbuffer, 453 length); 454 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 455 ("%s: writesb ret=%d address: 0x%X, len: %d, 0x%X\n", 456 __func__, ret, address, length, 457 *(int *)tbuffer)); 458 } else if (tbuffer) { 459 ret = 460 sdio_memcpy_toio(device->func, address, 461 tbuffer, length); 462 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 463 ("%s: writeio ret=%d address: 0x%X, len: %d, 0x%X\n", 464 __func__, ret, address, length, 465 *(int *)tbuffer)); 466 } 467 } else if (request & HIF_SDIO_READ) { 468 #if HIF_USE_DMA_BOUNCE_BUFFER 469 if (BUFFER_NEEDS_BOUNCE(buffer)) { 470 AR_DEBUG_ASSERT(device->dma_buffer != NULL); 471 AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); 472 if (length > HIF_DMA_BUFFER_SIZE) { 473 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 474 ("%s: Invalid read length: %d\n", 475 __func__, length)); 476 status = QDF_STATUS_E_INVAL; 477 break; 478 } 479 tbuffer = device->dma_buffer; 480 bounced = true; 481 } else { 482 tbuffer = buffer; 483 } 484 #else 485 tbuffer = buffer; 486 #endif 487 if (opcode == CMD53_FIXED_ADDRESS && tbuffer != NULL) { 488 ret = 489 sdio_readsb(device->func, tbuffer, 490 address, 491 length); 492 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 493 ("%s: readsb ret=%d address: 0x%X, len: %d, 0x%X\n", 494 __func__, ret, address, length, 495 *(int *)tbuffer)); 496 } else if (tbuffer) { 497 ret = 498 sdio_memcpy_fromio(device->func, 499 tbuffer, 500 address, length); 501 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 502 ("%s: readio ret=%d address: 0x%X, len: %d, 0x%X\n", 503 __func__, ret, address, length, 504 *(int *)tbuffer)); 505 } 506 #if HIF_USE_DMA_BOUNCE_BUFFER 507 if (bounced && tbuffer) 508 memcpy(buffer, tbuffer, length); 509 #endif 510 } else { 511 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 512 ("%s: Invalid direction: 0x%08x\n", 513 __func__, request)); 514 status = QDF_STATUS_E_INVAL; 515 return status; 516 } 517 518 if (ret) { 519 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 520 ("%s: SDIO bus operation failed! MMC stack returned : %d\n", 521 __func__, ret)); 522 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 523 ("__hif_read_write, addr:0X%06X, len:%08d, %s, %s\n", 524 address, length, 525 request & HIF_SDIO_READ ? "Read " : "Write", 526 request & HIF_ASYNCHRONOUS ? "Async" : 527 "Sync ")); 528 status = QDF_STATUS_E_FAILURE; 529 } 530 } while (false); 531 532 return status; 533 } 534 535 /** 536 * add_to_async_list() - add bus reqest to async task list 537 * @device: pointer to hif device 538 * @busrequest: pointer to type of bus request 539 * 540 * Return: None. 541 */ 542 void add_to_async_list(struct hif_sdio_dev *device, 543 struct bus_request *busrequest) 544 { 545 struct bus_request *async; 546 struct bus_request *active; 547 548 qdf_spin_lock_irqsave(&device->asynclock); 549 active = device->asyncreq; 550 if (active == NULL) { 551 device->asyncreq = busrequest; 552 device->asyncreq->inusenext = NULL; 553 } else { 554 for (async = device->asyncreq; 555 async != NULL; async = async->inusenext) { 556 active = async; 557 } 558 active->inusenext = busrequest; 559 busrequest->inusenext = NULL; 560 } 561 qdf_spin_unlock_irqrestore(&device->asynclock); 562 } 563 564 /** 565 * hif_read_write() - queue a read/write request 566 * @device: pointer to hif device structure 567 * @address: address to read 568 * @buffer: buffer to hold read/write data 569 * @length: length to read/write 570 * @request: read/write/sync/async request 571 * @context: pointer to hold calling context 572 * 573 * Return: 0 on success, error number otherwise. 574 */ 575 QDF_STATUS 576 hif_read_write(struct hif_sdio_dev *device, 577 uint32_t address, 578 char *buffer, uint32_t length, 579 uint32_t request, void *context) 580 { 581 QDF_STATUS status = QDF_STATUS_SUCCESS; 582 struct bus_request *busrequest; 583 584 AR_DEBUG_ASSERT(device != NULL); 585 AR_DEBUG_ASSERT(device->func != NULL); 586 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 587 ("%s: device 0x%pK addr 0x%X buffer 0x%pK len %d req 0x%X context 0x%pK", 588 __func__, device, address, buffer, 589 length, request, context)); 590 591 /*sdio r/w action is not needed when suspend, so just return */ 592 if ((device->is_suspend == true) 593 && (device->power_config == HIF_DEVICE_POWER_CUT)) { 594 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n")); 595 return QDF_STATUS_SUCCESS; 596 } 597 do { 598 if ((request & HIF_ASYNCHRONOUS) || 599 (request & HIF_SYNCHRONOUS)) { 600 /* serialize all requests through the async thread */ 601 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 602 ("%s: Execution mode: %s\n", __func__, 603 (request & HIF_ASYNCHRONOUS) ? "Async" 604 : "Synch")); 605 busrequest = hif_allocate_bus_request(device); 606 if (busrequest == NULL) { 607 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 608 ("no async bus requests available (%s, addr:0x%X, len:%d)\n", 609 request & HIF_SDIO_READ ? "READ" : 610 "WRITE", address, length)); 611 return QDF_STATUS_E_FAILURE; 612 } 613 busrequest->address = address; 614 busrequest->buffer = buffer; 615 busrequest->length = length; 616 busrequest->request = request; 617 busrequest->context = context; 618 619 add_to_async_list(device, busrequest); 620 621 if (request & HIF_SYNCHRONOUS) { 622 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 623 ("%s: queued sync req: 0x%lX\n", 624 __func__, (unsigned long)busrequest)); 625 626 /* wait for completion */ 627 up(&device->sem_async); 628 if (down_interruptible(&busrequest->sem_req) != 629 0) { 630 /* interrupted, exit */ 631 return QDF_STATUS_E_FAILURE; 632 } else { 633 QDF_STATUS status = busrequest->status; 634 635 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 636 ("%s: sync return freeing 0x%lX: 0x%X\n", 637 __func__, 638 (unsigned long) 639 busrequest, 640 busrequest->status)); 641 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 642 ("%s: freeing req: 0x%X\n", 643 __func__, 644 (unsigned int) 645 request)); 646 hif_free_bus_request(device, 647 busrequest); 648 return status; 649 } 650 } else { 651 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 652 ("%s: queued async req: 0x%lX\n", 653 __func__, 654 (unsigned long)busrequest)); 655 up(&device->sem_async); 656 return QDF_STATUS_E_PENDING; 657 } 658 } else { 659 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 660 ("%s: Invalid execution mode: 0x%08x\n", 661 __func__, 662 (unsigned int)request)); 663 status = QDF_STATUS_E_INVAL; 664 break; 665 } 666 } while (0); 667 668 return status; 669 } 670 671 /** 672 * async_task() - thread function to serialize all bus requests 673 * @param: pointer to hif device 674 * 675 * thread function to serialize all requests, both sync and async 676 * Return: 0 on success, error number otherwise. 677 */ 678 static int async_task(void *param) 679 { 680 struct hif_sdio_dev *device; 681 struct bus_request *request; 682 QDF_STATUS status; 683 684 device = (struct hif_sdio_dev *) param; 685 set_current_state(TASK_INTERRUPTIBLE); 686 while (!device->async_shutdown) { 687 /* wait for work */ 688 if (down_interruptible(&device->sem_async) != 0) { 689 /* interrupted, exit */ 690 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 691 ("%s: async task interrupted\n", 692 __func__)); 693 break; 694 } 695 if (device->async_shutdown) { 696 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 697 ("%s: async task stopping\n", 698 __func__)); 699 break; 700 } 701 /* we want to hold the host over multiple cmds 702 * if possible, but holding the host blocks 703 * card interrupts 704 */ 705 sdio_claim_host(device->func); 706 qdf_spin_lock_irqsave(&device->asynclock); 707 /* pull the request to work on */ 708 while (device->asyncreq != NULL) { 709 request = device->asyncreq; 710 if (request->inusenext != NULL) 711 device->asyncreq = request->inusenext; 712 else 713 device->asyncreq = NULL; 714 qdf_spin_unlock_irqrestore(&device->asynclock); 715 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 716 ("%s: async_task processing req: 0x%lX\n", 717 __func__, (unsigned long)request)); 718 719 if (request->scatter_req != NULL) { 720 A_ASSERT(device->scatter_enabled); 721 /* pass the request to scatter routine which 722 * executes it synchronously, note, no need 723 * to free the request since scatter requests 724 * are maintained on a separate list 725 */ 726 status = do_hif_read_write_scatter(device, 727 request); 728 } else { 729 /* call hif_read_write in sync mode */ 730 status = 731 __hif_read_write(device, 732 request->address, 733 request->buffer, 734 request->length, 735 request-> 736 request & 737 ~HIF_SYNCHRONOUS, 738 NULL); 739 if (request->request & HIF_ASYNCHRONOUS) { 740 void *context = request->context; 741 742 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 743 ("%s: freeing req: 0x%lX\n", 744 __func__, (unsigned long) 745 request)); 746 hif_free_bus_request(device, request); 747 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 748 ("%s: async_task completion req 0x%lX\n", 749 __func__, (unsigned long) 750 request)); 751 device->htc_callbacks. 752 rwCompletionHandler(context, 753 status); 754 } else { 755 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 756 ("%s: async_task upping req: 0x%lX\n", 757 __func__, (unsigned long) 758 request)); 759 request->status = status; 760 up(&request->sem_req); 761 } 762 } 763 qdf_spin_lock_irqsave(&device->asynclock); 764 } 765 qdf_spin_unlock_irqrestore(&device->asynclock); 766 sdio_release_host(device->func); 767 } 768 769 complete_and_exit(&device->async_completion, 0); 770 771 return 0; 772 } 773 774 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) 775 /** 776 * sdio_card_highspeed() - check if high speed supported 777 * @card: pointer to mmc card struct 778 * 779 * Return: non zero if card supports high speed. 780 */ 781 static inline int sdio_card_highspeed(struct mmc_card *card) 782 { 783 return mmc_card_highspeed(card); 784 } 785 #else 786 static inline int sdio_card_highspeed(struct mmc_card *card) 787 { 788 return mmc_card_hs(card); 789 } 790 #endif 791 792 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) 793 /** 794 * sdio_card_set_highspeed() - set high speed 795 * @card: pointer to mmc card struct 796 * 797 * Return: none. 798 */ 799 static inline void sdio_card_set_highspeed(struct mmc_card *card) 800 { 801 mmc_card_set_highspeed(card); 802 } 803 #else 804 static inline void sdio_card_set_highspeed(struct mmc_card *card) 805 { 806 } 807 #endif 808 809 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) 810 /** 811 * sdio_card_state() - set card state 812 * @card: pointer to mmc card struct 813 * 814 * Return: none. 815 */ 816 static inline void sdio_card_state(struct mmc_card *card) 817 { 818 card->state &= ~MMC_STATE_HIGHSPEED; 819 } 820 #else 821 static inline void sdio_card_state(struct mmc_card *card) 822 { 823 } 824 #endif 825 826 /** 827 * reinit_sdio() - re-initialize sdio bus 828 * @param: pointer to hif device 829 * 830 * Return: 0 on success, error number otherwise. 831 */ 832 static QDF_STATUS reinit_sdio(struct hif_sdio_dev *device) 833 { 834 int32_t err = 0; 835 struct mmc_host *host; 836 struct mmc_card *card; 837 struct sdio_func *func; 838 uint8_t cmd52_resp; 839 uint32_t clock; 840 841 func = device->func; 842 card = func->card; 843 host = card->host; 844 845 sdio_claim_host(func); 846 847 do { 848 /* Enable high speed */ 849 if (card->host->caps & MMC_CAP_SD_HIGHSPEED) { 850 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 851 ("%s: Set high speed mode\n", 852 __func__)); 853 err = func0_cmd52_read_byte(card, SDIO_CCCR_SPEED, 854 &cmd52_resp); 855 if (err) { 856 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 857 ("%s: CMD52 read to CCCR speed register failed : %d\n", 858 __func__, err)); 859 sdio_card_state(card); 860 /* no need to break */ 861 } else { 862 err = func0_cmd52_write_byte(card, 863 SDIO_CCCR_SPEED, 864 (cmd52_resp | SDIO_SPEED_EHS)); 865 if (err) { 866 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 867 ("%s: CMD52 write to CCCR speed register failed : %d\n", 868 __func__, err)); 869 break; 870 } 871 sdio_card_set_highspeed(card); 872 host->ios.timing = MMC_TIMING_SD_HS; 873 host->ops->set_ios(host, &host->ios); 874 } 875 } 876 877 /* Set clock */ 878 if (sdio_card_highspeed(card)) 879 clock = 50000000; 880 else 881 clock = card->cis.max_dtr; 882 883 if (clock > host->f_max) 884 clock = host->f_max; 885 /* 886 * In fpga mode the clk should be set to 12500000, 887 * or will result in scan channel setting timeout error. 888 * So in fpga mode, please set module parameter mmcclock 889 * to 12500000. 890 */ 891 if (mmcclock > 0) 892 clock = mmcclock; 893 host->ios.clock = clock; 894 host->ops->set_ios(host, &host->ios); 895 896 897 if (card->host->caps & MMC_CAP_4_BIT_DATA) { 898 /* CMD52: Set bus width & disable card detect resistor */ 899 err = func0_cmd52_write_byte(card, SDIO_CCCR_IF, 900 SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_4BIT); 901 if (err) { 902 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 903 ("%s: CMD52 to set bus mode failed : %d\n", 904 __func__, err)); 905 break; 906 } 907 host->ios.bus_width = MMC_BUS_WIDTH_4; 908 host->ops->set_ios(host, &host->ios); 909 } 910 } while (0); 911 912 sdio_release_host(func); 913 914 return (err) ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; 915 } 916 917 /* 918 * Setup IRQ mode for deep sleep and WoW 919 * Switch back to 1 bits mode when we suspend for 920 * WoW in order to detect SDIO irq without clock. 921 * Re-enable async 4-bit irq mode for some host controllers 922 * after resume. 923 */ 924 static int sdio_enable4bits(struct hif_sdio_dev *device, int enable) 925 { 926 int ret = 0; 927 struct sdio_func *func = device->func; 928 struct mmc_card *card = func->card; 929 struct mmc_host *host = card->host; 930 931 if (!(host->caps & (MMC_CAP_4_BIT_DATA))) 932 return 0; 933 934 if (card->cccr.low_speed && !card->cccr.wide_bus) 935 return 0; 936 937 sdio_claim_host(func); 938 do { 939 int setAsyncIRQ = 0; 940 __u16 manufacturer_id = 941 device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; 942 943 /* Re-enable 4-bit ASYNC interrupt on AR6003x 944 * after system resume for some host controller 945 */ 946 if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) { 947 setAsyncIRQ = 1; 948 ret = 949 func0_cmd52_write_byte(func->card, 950 CCCR_SDIO_IRQ_MODE_REG_AR6003, 951 enable ? 952 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003 953 : 0); 954 } else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE || 955 manufacturer_id == MANUFACTURER_ID_QCA9377_BASE || 956 manufacturer_id == MANUFACTURER_ID_QCA9379_BASE) { 957 unsigned char data = 0; 958 959 setAsyncIRQ = 1; 960 ret = 961 func0_cmd52_read_byte(func->card, 962 CCCR_SDIO_IRQ_MODE_REG_AR6320, 963 &data); 964 if (ret) { 965 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 966 ("%s: failed to read interrupt extension register %d\n", 967 __func__, ret)); 968 sdio_release_host(func); 969 return ret; 970 } 971 if (enable) 972 data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; 973 else 974 data &= ~SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; 975 ret = 976 func0_cmd52_write_byte(func->card, 977 CCCR_SDIO_IRQ_MODE_REG_AR6320, 978 data); 979 } 980 if (setAsyncIRQ) { 981 if (ret) { 982 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 983 ("%s: failed to setup 4-bit ASYNC IRQ mode into %d err %d\n", 984 __func__, enable, ret)); 985 } else { 986 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 987 ("%s: Setup 4-bit ASYNC IRQ mode into %d successfully\n", 988 __func__, enable)); 989 } 990 } 991 } while (0); 992 sdio_release_host(func); 993 994 return ret; 995 } 996 997 998 /** 999 * power_state_change_notify() - SDIO bus power notification handler 1000 * @config: hif device power change type 1001 * 1002 * Return: 0 on success, error number otherwise. 1003 */ 1004 static QDF_STATUS 1005 power_state_change_notify(struct hif_sdio_dev *device, 1006 enum HIF_DEVICE_POWER_CHANGE_TYPE config) 1007 { 1008 QDF_STATUS status = QDF_STATUS_SUCCESS; 1009 struct sdio_func *func = device->func; 1010 int old_reset_val; 1011 1012 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1013 ("%s: config type %d\n", 1014 __func__, config)); 1015 switch (config) { 1016 case HIF_DEVICE_POWER_DOWN: 1017 /* Disable 4bits to allow SDIO bus to detect 1018 * DAT1 as interrupt source 1019 */ 1020 sdio_enable4bits(device, 0); 1021 break; 1022 case HIF_DEVICE_POWER_CUT: 1023 old_reset_val = reset_sdio_on_unload; 1024 reset_sdio_on_unload = 1; 1025 status = hif_disable_func(device, func); 1026 reset_sdio_on_unload = old_reset_val; 1027 if (!device->is_suspend) { 1028 device->power_config = config; 1029 mmc_detect_change(device->host, HZ / 3); 1030 } 1031 break; 1032 case HIF_DEVICE_POWER_UP: 1033 if (device->power_config == HIF_DEVICE_POWER_CUT) { 1034 if (device->is_suspend) { 1035 status = reinit_sdio(device); 1036 /* set power_config before EnableFunc to 1037 * passthrough sdio r/w action when resuming 1038 * from cut power 1039 */ 1040 device->power_config = config; 1041 if (status == QDF_STATUS_SUCCESS) 1042 status = hif_enable_func(device, func); 1043 } else { 1044 /* device->func is bad pointer at this time */ 1045 mmc_detect_change(device->host, 0); 1046 return QDF_STATUS_E_PENDING; 1047 } 1048 } else if (device->power_config == HIF_DEVICE_POWER_DOWN) { 1049 int ret = sdio_enable4bits(device, 1); 1050 1051 status = (ret == 0) ? QDF_STATUS_SUCCESS : 1052 QDF_STATUS_E_FAILURE; 1053 } 1054 break; 1055 } 1056 device->power_config = config; 1057 1058 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1059 ("%s:\n", __func__)); 1060 1061 return status; 1062 } 1063 1064 #ifdef SDIO_3_0 1065 /** 1066 * set_extended_mbox_size() - set extended MBOX size 1067 * @pinfo: sdio mailbox info 1068 * 1069 * Return: none. 1070 */ 1071 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo) 1072 { 1073 pinfo->mbox_prop[0].extended_size = 1074 HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0; 1075 pinfo->mbox_prop[1].extended_size = 1076 HIF_MBOX1_EXTENDED_WIDTH_AR6320; 1077 } 1078 1079 /** 1080 * set_extended_mbox_address() - set extended MBOX address 1081 * @pinfo: sdio mailbox info 1082 * 1083 * Return: none. 1084 */ 1085 static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo) 1086 { 1087 pinfo->mbox_prop[1].extended_address = 1088 pinfo->mbox_prop[0].extended_address + 1089 pinfo->mbox_prop[0].extended_size + 1090 HIF_MBOX_DUMMY_SPACE_SIZE_AR6320; 1091 } 1092 #else 1093 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo) 1094 { 1095 pinfo->mbox_prop[0].extended_size = 1096 HIF_MBOX0_EXTENDED_WIDTH_AR6320; 1097 } 1098 static inline void 1099 set_extended_mbox_address(struct hif_device_mbox_info *pinfo) 1100 { 1101 1102 } 1103 #endif 1104 1105 /** 1106 * set_extended_mbox_window_info() - set extended MBOX window 1107 * information for SDIO interconnects 1108 * @manf_id: manufacturer id 1109 * @pinfo: sdio mailbox info 1110 * 1111 * Return: none. 1112 */ 1113 static void set_extended_mbox_window_info(uint16_t manf_id, 1114 struct hif_device_mbox_info *pinfo) 1115 { 1116 switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) { 1117 case MANUFACTURER_ID_AR6002_BASE: 1118 /* MBOX 0 has an extended range */ 1119 1120 pinfo->mbox_prop[0].extended_address = 1121 HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; 1122 pinfo->mbox_prop[0].extended_size = 1123 HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; 1124 1125 pinfo->mbox_prop[0].extended_address = 1126 HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; 1127 pinfo->mbox_prop[0].extended_size = 1128 HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; 1129 1130 pinfo->mbox_prop[0].extended_address = 1131 HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004; 1132 pinfo->mbox_prop[0].extended_size = 1133 HIF_MBOX0_EXTENDED_WIDTH_AR6004; 1134 1135 break; 1136 case MANUFACTURER_ID_AR6003_BASE: 1137 /* MBOX 0 has an extended range */ 1138 pinfo->mbox_prop[0].extended_address = 1139 HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; 1140 pinfo->mbox_prop[0].extended_size = 1141 HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; 1142 pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; 1143 pinfo->gmbox_size = HIF_GMBOX_WIDTH; 1144 break; 1145 case MANUFACTURER_ID_AR6004_BASE: 1146 pinfo->mbox_prop[0].extended_address = 1147 HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004; 1148 pinfo->mbox_prop[0].extended_size = 1149 HIF_MBOX0_EXTENDED_WIDTH_AR6004; 1150 pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; 1151 pinfo->gmbox_size = HIF_GMBOX_WIDTH; 1152 break; 1153 case MANUFACTURER_ID_AR6320_BASE: { 1154 uint16_t ManuRevID = 1155 manf_id & MANUFACTURER_ID_AR6K_REV_MASK; 1156 pinfo->mbox_prop[0].extended_address = 1157 HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320; 1158 if (ManuRevID < 4) { 1159 pinfo->mbox_prop[0].extended_size = 1160 HIF_MBOX0_EXTENDED_WIDTH_AR6320; 1161 } else { 1162 /* from rome 2.0(0x504), the width has been extended to 56K */ 1163 set_extended_mbox_size(pinfo); 1164 } 1165 set_extended_mbox_address(pinfo); 1166 pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; 1167 pinfo->gmbox_size = HIF_GMBOX_WIDTH; 1168 break; 1169 } 1170 case MANUFACTURER_ID_QCA9377_BASE: 1171 case MANUFACTURER_ID_QCA9379_BASE: 1172 pinfo->mbox_prop[0].extended_address = 1173 HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320; 1174 pinfo->mbox_prop[0].extended_size = 1175 HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0; 1176 pinfo->mbox_prop[1].extended_address = 1177 pinfo->mbox_prop[0].extended_address + 1178 pinfo->mbox_prop[0].extended_size + 1179 HIF_MBOX_DUMMY_SPACE_SIZE_AR6320; 1180 pinfo->mbox_prop[1].extended_size = 1181 HIF_MBOX1_EXTENDED_WIDTH_AR6320; 1182 pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; 1183 pinfo->gmbox_size = HIF_GMBOX_WIDTH; 1184 break; 1185 default: 1186 A_ASSERT(false); 1187 break; 1188 } 1189 } 1190 1191 /** 1192 * hif_configure_device() - configure sdio device 1193 * @device: pointer to hif device structure 1194 * @opcode: configuration type 1195 * @config: configuration value to set 1196 * @configLen: configuration length 1197 * 1198 * Return: 0 on success, error number otherwise. 1199 */ 1200 QDF_STATUS 1201 hif_configure_device(struct hif_sdio_dev *device, 1202 enum hif_device_config_opcode opcode, 1203 void *config, uint32_t config_len) 1204 { 1205 uint32_t count; 1206 QDF_STATUS status = QDF_STATUS_SUCCESS; 1207 1208 switch (opcode) { 1209 case HIF_DEVICE_GET_MBOX_BLOCK_SIZE: 1210 ((uint32_t *) config)[0] = HIF_MBOX0_BLOCK_SIZE; 1211 ((uint32_t *) config)[1] = HIF_MBOX1_BLOCK_SIZE; 1212 ((uint32_t *) config)[2] = HIF_MBOX2_BLOCK_SIZE; 1213 ((uint32_t *) config)[3] = HIF_MBOX3_BLOCK_SIZE; 1214 break; 1215 1216 case HIF_DEVICE_GET_MBOX_ADDR: 1217 for (count = 0; count < 4; count++) { 1218 ((uint32_t *) config)[count] = 1219 HIF_MBOX_START_ADDR(count); 1220 } 1221 1222 if (config_len >= sizeof(struct hif_device_mbox_info)) { 1223 set_extended_mbox_window_info((uint16_t) device->func-> 1224 device, 1225 (struct hif_device_mbox_info *) 1226 config); 1227 } 1228 1229 break; 1230 case HIF_DEVICE_GET_PENDING_EVENTS_FUNC: 1231 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 1232 ("%s: configuration opcode %d\n", 1233 __func__, opcode)); 1234 status = QDF_STATUS_E_FAILURE; 1235 break; 1236 case HIF_DEVICE_GET_IRQ_PROC_MODE: 1237 *((enum hif_device_irq_mode *) config) = 1238 HIF_DEVICE_IRQ_SYNC_ONLY; 1239 break; 1240 case HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: 1241 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 1242 ("%s: configuration opcode %d\n", 1243 __func__, opcode)); 1244 status = QDF_STATUS_E_FAILURE; 1245 break; 1246 case HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: 1247 if (!device->scatter_enabled) 1248 return QDF_STATUS_E_NOSUPPORT; 1249 status = 1250 setup_hif_scatter_support(device, 1251 (struct HIF_DEVICE_SCATTER_SUPPORT_INFO *) 1252 config); 1253 if (QDF_IS_STATUS_ERROR(status)) 1254 device->scatter_enabled = false; 1255 break; 1256 case HIF_DEVICE_GET_OS_DEVICE: 1257 /* pass back a pointer to the SDIO function's "dev" struct */ 1258 ((struct HIF_DEVICE_OS_DEVICE_INFO *) config)->os_dev = 1259 &device->func->dev; 1260 break; 1261 case HIF_DEVICE_POWER_STATE_CHANGE: 1262 status = 1263 power_state_change_notify(device, 1264 *(enum HIF_DEVICE_POWER_CHANGE_TYPE *) 1265 config); 1266 break; 1267 case HIF_DEVICE_GET_IRQ_YIELD_PARAMS: 1268 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 1269 ("%s: configuration opcode %d\n", 1270 __func__, opcode)); 1271 status = QDF_STATUS_E_FAILURE; 1272 break; 1273 case HIF_DEVICE_SET_HTC_CONTEXT: 1274 device->htc_context = config; 1275 break; 1276 case HIF_DEVICE_GET_HTC_CONTEXT: 1277 if (config == NULL) { 1278 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 1279 ("%s: htc context is NULL\n", 1280 __func__)); 1281 return QDF_STATUS_E_FAILURE; 1282 } 1283 *(void **)config = device->htc_context; 1284 break; 1285 case HIF_BMI_DONE: 1286 { 1287 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1288 ("%s: BMI_DONE\n", __func__)); 1289 break; 1290 } 1291 default: 1292 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 1293 ("%s: Unsupported configuration opcode: %d\n", 1294 __func__, opcode)); 1295 status = QDF_STATUS_E_FAILURE; 1296 } 1297 1298 return status; 1299 } 1300 1301 /** 1302 * hif_sdio_shutdown() - hif-sdio shutdown routine 1303 * @hif_ctx: pointer to hif_softc structore 1304 * 1305 * Return: None. 1306 */ 1307 void hif_sdio_shutdown(struct hif_softc *hif_ctx) 1308 { 1309 struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); 1310 struct hif_sdio_dev *hif_device = scn->hif_handle; 1311 1312 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1313 ("%s: Enter\n", __func__)); 1314 if (hif_device != NULL) { 1315 AR_DEBUG_ASSERT(hif_device->power_config == HIF_DEVICE_POWER_CUT 1316 || hif_device->func != NULL); 1317 } else { 1318 int i; 1319 /* since we are unloading the driver anyways, 1320 * reset all cards in case the SDIO card is 1321 * externally powered and we are unloading the SDIO 1322 * stack. This avoids the problem when the SDIO stack 1323 * is reloaded and attempts are made to re-enumerate 1324 * a card that is already enumerated 1325 */ 1326 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1327 ("%s: hif_shut_down_device, resetting\n", 1328 __func__)); 1329 reset_all_cards(); 1330 1331 /* Unregister with bus driver core */ 1332 if (registered) { 1333 registered = 0; 1334 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1335 ("%s: Unregistering with the bus driver\n", 1336 __func__)); 1337 hif_sdio_remove_callbacks(); 1338 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1339 ("%s: Unregistered!", 1340 __func__)); 1341 } 1342 1343 for (i = 0; i < MAX_HIF_DEVICES; ++i) { 1344 if (hif_devices[i] && hif_devices[i]->func == NULL) { 1345 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1346 ("%s: Remove pending hif_device %pK\n", 1347 __func__, hif_devices[i])); 1348 del_hif_device(hif_devices[i]); 1349 hif_devices[i] = NULL; 1350 } 1351 } 1352 } 1353 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1354 ("%s: Exit\n", __func__)); 1355 } 1356 1357 /** 1358 * hif_irq_handler() - hif-sdio interrupt handler 1359 * @func: pointer to sdio_func 1360 * 1361 * Return: None. 1362 */ 1363 static void hif_irq_handler(struct sdio_func *func) 1364 { 1365 QDF_STATUS status; 1366 struct hif_sdio_dev *device; 1367 1368 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1369 ("%s: Enter\n", __func__)); 1370 1371 device = get_hif_device(func); 1372 atomic_set(&device->irq_handling, 1); 1373 /* release the host during intr so we can use 1374 * it when we process cmds 1375 */ 1376 sdio_release_host(device->func); 1377 status = device->htc_callbacks.dsrHandler(device->htc_callbacks 1378 .context); 1379 sdio_claim_host(device->func); 1380 atomic_set(&device->irq_handling, 0); 1381 AR_DEBUG_ASSERT(status == QDF_STATUS_SUCCESS || 1382 status == QDF_STATUS_E_CANCELED); 1383 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1384 ("%s: Exit\n", __func__)); 1385 } 1386 1387 /** 1388 * startup_task() - startup task to fill ol_softc 1389 * @param: pointer to struct hif_sdio_dev 1390 * 1391 * Return: 0 on success, error number otherwise. 1392 */ 1393 static int startup_task(void *param) 1394 { 1395 struct hif_sdio_dev *device; 1396 1397 device = (struct hif_sdio_dev *) param; 1398 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1399 ("%s: call HTC from startup_task\n", 1400 __func__)); 1401 /* start up inform DRV layer */ 1402 if ((osdrv_callbacks. 1403 device_inserted_handler(osdrv_callbacks.context, 1404 device)) != QDF_STATUS_SUCCESS) { 1405 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1406 ("%s: Device rejected\n", __func__)); 1407 } 1408 1409 return 0; 1410 } 1411 1412 static int enable_task(void *param) 1413 { 1414 struct hif_sdio_dev *device; 1415 1416 device = (struct hif_sdio_dev *) param; 1417 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1418 ("%s: call from resume_task\n", 1419 __func__)); 1420 1421 /* start up inform DRV layer */ 1422 if (device && 1423 device->claimed_ctx && 1424 osdrv_callbacks.device_power_change_handler && 1425 osdrv_callbacks.device_power_change_handler(device->claimed_ctx, 1426 HIF_DEVICE_POWER_UP) != 1427 QDF_STATUS_SUCCESS) { 1428 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1429 ("%s: Device rejected\n", 1430 __func__)); 1431 } 1432 1433 return 0; 1434 } 1435 1436 /** 1437 * foce_drive_strength() - Set sdio drive strength 1438 * @func: pointer to sdio_func 1439 * 1440 * Return: none. 1441 */ 1442 static void foce_drive_strength(struct sdio_func *func) 1443 { 1444 unsigned int addr = SDIO_CCCR_DRIVE_STRENGTH; 1445 unsigned char value = 0; 1446 1447 uint32_t err = func0_cmd52_read_byte(func->card, 1448 addr, &value); 1449 if (err) { 1450 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1451 ("%s: Read CCCR 0x%02X failed: %d\n", 1452 __func__, 1453 (unsigned int) addr, 1454 (unsigned int) err)); 1455 } else { 1456 value = (value & 1457 (~(SDIO_DRIVE_DTSx_MASK << 1458 SDIO_DRIVE_DTSx_SHIFT))) | 1459 SDIO_DTSx_SET_TYPE_D; 1460 err = func0_cmd52_write_byte(func->card, addr, 1461 value); 1462 if (err) { 1463 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1464 ("%s: Write CCCR 0x%02X to 0x%02X failed: %d\n", 1465 __func__, 1466 (unsigned int) addr, 1467 (unsigned int) value, 1468 (unsigned int) err)); 1469 } else { 1470 addr = CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR; 1471 value = 0; 1472 err = func0_cmd52_read_byte(func->card, 1473 addr, &value); 1474 if (err) { 1475 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1476 ("Read CCCR 0x%02X failed: %d\n", 1477 (unsigned int) addr, 1478 (unsigned int) err)); 1479 } else { 1480 value = (value & 1481 (~CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK) 1482 ) | 1483 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A | 1484 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C | 1485 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D; 1486 err = func0_cmd52_write_byte(func->card, 1487 addr, value); 1488 if (err) { 1489 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1490 ("Write CCCR 0x%02X to 0x%02X failed: %d\n", 1491 (unsigned int) addr, 1492 (unsigned int) value, 1493 (unsigned int) err)); 1494 } 1495 } 1496 } 1497 } 1498 } 1499 1500 /** 1501 * write_cccr() - write CCCR 1502 * @func: pointer to sdio_func 1503 * 1504 * Return: none. 1505 */ 1506 static void write_cccr(struct sdio_func *func) 1507 { 1508 if (writecccr1) { 1509 uint32_t err = func0_cmd52_write_byte(func->card, 1510 writecccr1, 1511 writecccr1value); 1512 if (err) { 1513 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1514 ("Write CCCR 0x%02X to 0x%02X failed: %d\n", 1515 (unsigned int)writecccr1, 1516 (unsigned int)writecccr1value, 1517 (unsigned int)err)); 1518 } else { 1519 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1520 ("Write CCCR 0x%02X to 0x%02X OK\n", 1521 (unsigned int)writecccr1, 1522 (unsigned int)writecccr1value)); 1523 } 1524 } 1525 if (writecccr2) { 1526 uint32_t err = func0_cmd52_write_byte(func->card, 1527 writecccr2, 1528 writecccr2value); 1529 if (err) { 1530 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1531 ("Write CCCR 0x%02X to 0x%02X failed: %d\n", 1532 (unsigned int)writecccr2, 1533 (unsigned int)writecccr2value, 1534 (unsigned int)err)); 1535 } else { 1536 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1537 ("Write CCCR 0x%02X to 0x%02X OK\n", 1538 (unsigned int)writecccr2, 1539 (unsigned int)writecccr2value)); 1540 } 1541 } 1542 if (writecccr3) { 1543 uint32_t err = func0_cmd52_write_byte(func->card, 1544 writecccr3, 1545 writecccr3value); 1546 if (err) { 1547 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1548 ("Write CCCR 0x%02X to 0x%02X failed: %d\n", 1549 (unsigned int)writecccr3, 1550 (unsigned int)writecccr3value, 1551 (unsigned int)err)); 1552 } else { 1553 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1554 ("Write CCCR 0x%02X to 0x%02X OK\n", 1555 (unsigned int)writecccr3, 1556 (unsigned int)writecccr3value)); 1557 } 1558 } 1559 if (writecccr4) { 1560 uint32_t err = func0_cmd52_write_byte(func->card, 1561 writecccr4, 1562 writecccr4value); 1563 if (err) 1564 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1565 ("Write CCCR 0x%02X to 0x%02X failed: %d\n", 1566 (unsigned int)writecccr4, 1567 (unsigned int)writecccr4value, 1568 (unsigned int)err)); 1569 else 1570 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1571 ("Write CCCR 0x%02X to 0x%02X OK\n", 1572 (unsigned int)writecccr4, 1573 (unsigned int)writecccr4value)); 1574 } 1575 } 1576 1577 #ifdef SDIO_BUS_WIDTH_8BIT 1578 static int hif_cmd52_write_byte_8bit(struct sdio_func *func) 1579 { 1580 return func0_cmd52_write_byte(func->card, SDIO_CCCR_IF, 1581 SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_8BIT); 1582 } 1583 #else 1584 static int hif_cmd52_write_byte_8bit(struct sdio_func *func) 1585 { 1586 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 1587 ("%s: 8BIT Bus Width not supported\n", __func__)); 1588 return QDF_STATUS_E_FAILURE; 1589 } 1590 #endif 1591 1592 /** 1593 * hif_device_inserted() - hif-sdio driver probe handler 1594 * @func: pointer to sdio_func 1595 * @id: pointer to sdio_device_id 1596 * 1597 * Return: 0 on success, error number otherwise. 1598 */ 1599 static int hif_device_inserted(struct sdio_func *func, 1600 const struct sdio_device_id *id) 1601 { 1602 int i; 1603 int ret; 1604 struct hif_sdio_dev *device = NULL; 1605 int count; 1606 uint32_t clock, clock_set = 12500000; 1607 1608 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1609 ("%s: Function: 0x%X, Vendor ID: 0x%X, Device ID: 0x%X, block size: 0x%X/0x%X\n", 1610 __func__, func->num, func->vendor, id->device, 1611 func->max_blksize, func->cur_blksize)); 1612 /* dma_mask should not be NULL, otherwise dma_map_single 1613 * will crash. TODO: check why dma_mask is NULL here 1614 */ 1615 if (func->dev.dma_mask == NULL) { 1616 static u64 dma_mask = 0xFFFFFFFF; 1617 1618 func->dev.dma_mask = &dma_mask; 1619 } 1620 for (i = 0; i < MAX_HIF_DEVICES; ++i) { 1621 struct hif_sdio_dev *hifdevice = hif_devices[i]; 1622 1623 if (hifdevice && hifdevice->power_config == HIF_DEVICE_POWER_CUT 1624 && hifdevice->host == func->card->host) { 1625 hifdevice->func = func; 1626 hifdevice->power_config = HIF_DEVICE_POWER_UP; 1627 sdio_set_drvdata(func, hifdevice); 1628 device = get_hif_device(func); 1629 1630 if (device->is_suspend) { 1631 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1632 ("%s: Resume from suspend", 1633 __func__)); 1634 ret = reinit_sdio(device); 1635 } 1636 break; 1637 } 1638 } 1639 1640 if (device == NULL) { 1641 if (add_hif_device(func) == NULL) 1642 return QDF_STATUS_E_FAILURE; 1643 device = get_hif_device(func); 1644 1645 for (i = 0; i < MAX_HIF_DEVICES; ++i) { 1646 if (hif_devices[i] == NULL) { 1647 hif_devices[i] = device; 1648 break; 1649 } 1650 } 1651 if (i == MAX_HIF_DEVICES) { 1652 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1653 ("%s: No more hif_devices[] slot for %pK", 1654 __func__, device)); 1655 } 1656 1657 device->id = id; 1658 device->host = func->card->host; 1659 device->is_disabled = true; 1660 /* TODO: MMC SDIO3.0 Setting should also be modified in ReInit() 1661 * function when Power Manage work. 1662 */ 1663 sdio_claim_host(func); 1664 /* force driver strength to type D */ 1665 if (forcedriverstrength == 1) 1666 foce_drive_strength(func); 1667 write_cccr(func); 1668 /* Set MMC Clock */ 1669 if (mmcclock > 0) 1670 clock_set = mmcclock; 1671 if (sdio_card_highspeed(func->card)) 1672 clock = 50000000; 1673 else 1674 clock = func->card->cis.max_dtr; 1675 if (clock > device->host->f_max) 1676 clock = device->host->f_max; 1677 1678 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1679 ("%s: Dumping clocks (%d,%d)\n", 1680 __func__, func->card->cis.max_dtr, 1681 device->host->f_max)); 1682 1683 /* only when mmcclock module parameter is specified, 1684 * set the clock explicitly 1685 */ 1686 if (mmcclock > 0) { 1687 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 1688 ("Decrease host clock from %d to %d(%d,%d)\n", 1689 clock, clock_set, 1690 func->card->cis.max_dtr, 1691 device->host->f_max)); 1692 device->host->ios.clock = clock_set; 1693 device->host->ops->set_ios(device->host, 1694 &device->host->ios); 1695 } 1696 /* Set SDIO3.0 */ 1697 /* Set MMC Bus Width: 1-1Bit, 4-4Bit, 8-8Bit */ 1698 if (mmcbuswidth > 0) { 1699 if (mmcbuswidth == 1) { 1700 ret = 1701 func0_cmd52_write_byte(func->card, 1702 SDIO_CCCR_IF, 1703 SDIO_BUS_CD_DISABLE 1704 | 1705 SDIO_BUS_WIDTH_1BIT); 1706 if (ret) { 1707 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 1708 ("%s: CMD52 to set bus width failed: %d\n", 1709 __func__, ret)); 1710 goto del_hif_dev;; 1711 } 1712 device->host->ios.bus_width = 1713 MMC_BUS_WIDTH_1; 1714 device->host->ops->set_ios(device->host, 1715 &device-> 1716 host->ios); 1717 } else if (mmcbuswidth == 4 1718 && (device->host-> 1719 caps & MMC_CAP_4_BIT_DATA)) { 1720 ret = 1721 func0_cmd52_write_byte(func->card, 1722 SDIO_CCCR_IF, 1723 SDIO_BUS_CD_DISABLE 1724 | 1725 SDIO_BUS_WIDTH_4BIT); 1726 if (ret) { 1727 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 1728 ("%s: CMD52 to bus width failed: %d\n", 1729 __func__, 1730 ret)); 1731 goto del_hif_dev; 1732 } 1733 device->host->ios.bus_width = 1734 MMC_BUS_WIDTH_4; 1735 device->host->ops->set_ios(device->host, 1736 &device-> 1737 host->ios); 1738 } else if (mmcbuswidth == 8 1739 && (device->host-> 1740 caps & MMC_CAP_8_BIT_DATA)) { 1741 ret = hif_cmd52_write_byte_8bit(func); 1742 if (ret) { 1743 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 1744 ("%s: CMD52 to bus width failed: %d\n", 1745 __func__, 1746 ret)); 1747 goto del_hif_dev; 1748 } 1749 device->host->ios.bus_width = 1750 MMC_BUS_WIDTH_8; 1751 device->host->ops->set_ios(device->host, 1752 &device-> 1753 host->ios); 1754 } else { 1755 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 1756 ("%s: MMC bus width %d is not supported.\n", 1757 __func__, 1758 mmcbuswidth)); 1759 ret = QDF_STATUS_E_FAILURE; 1760 goto del_hif_dev; 1761 } 1762 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, 1763 ("%s: Set MMC bus width to %dBit.\n", 1764 __func__, mmcbuswidth)); 1765 } 1766 if (debugcccr) 1767 hif_dump_cccr(device); 1768 1769 sdio_release_host(func); 1770 } 1771 1772 qdf_spinlock_create(&device->lock); 1773 1774 qdf_spinlock_create(&device->asynclock); 1775 1776 DL_LIST_INIT(&device->scatter_req_head); 1777 1778 if (!nohifscattersupport) { 1779 /* try to allow scatter operation on all instances, 1780 * unless globally overridden 1781 */ 1782 device->scatter_enabled = true; 1783 } else 1784 device->scatter_enabled = false; 1785 1786 /* Initialize the bus requests to be used later */ 1787 qdf_mem_zero(device->bus_request, sizeof(device->bus_request)); 1788 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) { 1789 sema_init(&device->bus_request[count].sem_req, 0); 1790 hif_free_bus_request(device, &device->bus_request[count]); 1791 } 1792 sema_init(&device->sem_async, 0); 1793 1794 ret = hif_enable_func(device, func); 1795 if ((ret == QDF_STATUS_SUCCESS || ret == QDF_STATUS_E_PENDING)) 1796 return 0; 1797 ret = QDF_STATUS_E_FAILURE; 1798 del_hif_dev: 1799 del_hif_device(device); 1800 for (i = 0; i < MAX_HIF_DEVICES; ++i) { 1801 if (hif_devices[i] == device) { 1802 hif_devices[i] = NULL; 1803 break; 1804 } 1805 } 1806 if (i == MAX_HIF_DEVICES) { 1807 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1808 ("%s: No hif_devices[] slot for %pK", 1809 __func__, device)); 1810 } 1811 return ret; 1812 } 1813 1814 /** 1815 * hif_ack_interrupt() - Acknowledge hif device irq 1816 * @device: pointer to struct hif_sdio_dev 1817 * 1818 * This should translate to an acknowledgment to the bus driver indicating that 1819 * the previous interrupt request has been serviced and the all the relevant 1820 * sources have been cleared. HTC is ready to process more interrupts. 1821 * This should prevent the bus driver from raising an interrupt unless the 1822 * previous one has been serviced and acknowledged using the previous API. 1823 * 1824 * Return: None. 1825 */ 1826 void hif_ack_interrupt(struct hif_sdio_dev *device) 1827 { 1828 AR_DEBUG_ASSERT(device != NULL); 1829 1830 /* Acknowledge our function IRQ */ 1831 } 1832 1833 /** 1834 * hif_un_mask_interrupt() - Re-enable hif device irq 1835 * @device: pointer to struct hif_sdio_dev 1836 * 1837 * 1838 * Return: None. 1839 */ 1840 void hif_un_mask_interrupt(struct hif_sdio_dev *device) 1841 { 1842 int ret; 1843 1844 AR_DEBUG_ASSERT(device != NULL); 1845 AR_DEBUG_ASSERT(device->func != NULL); 1846 1847 HIF_ENTER(); 1848 /* 1849 * On HP Elitebook 8460P, interrupt mode is not stable 1850 * in high throughput, so polling method should be used 1851 * instead of interrupt mode. 1852 */ 1853 if (brokenirq) { 1854 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 1855 ("%s: Using broken IRQ mode\n", 1856 __func__)); 1857 /* disable IRQ support even the capability exists */ 1858 device->func->card->host->caps &= ~MMC_CAP_SDIO_IRQ; 1859 } 1860 /* Register the IRQ Handler */ 1861 sdio_claim_host(device->func); 1862 ret = sdio_claim_irq(device->func, hif_irq_handler); 1863 sdio_release_host(device->func); 1864 AR_DEBUG_ASSERT(ret == 0); 1865 HIF_EXIT(); 1866 } 1867 1868 /** 1869 * hif_mask_interrupt() - Disable hif device irq 1870 * @device: pointer to struct hif_sdio_dev 1871 * 1872 * 1873 * Return: None. 1874 */ 1875 void hif_mask_interrupt(struct hif_sdio_dev *device) 1876 { 1877 int ret; 1878 1879 AR_DEBUG_ASSERT(device != NULL); 1880 AR_DEBUG_ASSERT(device->func != NULL); 1881 1882 HIF_ENTER(); 1883 1884 /* Mask our function IRQ */ 1885 sdio_claim_host(device->func); 1886 while (atomic_read(&device->irq_handling)) { 1887 sdio_release_host(device->func); 1888 schedule_timeout_interruptible(HZ / 10); 1889 sdio_claim_host(device->func); 1890 } 1891 ret = sdio_release_irq(device->func); 1892 sdio_release_host(device->func); 1893 if (ret) { 1894 if (ret == -ETIMEDOUT) { 1895 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 1896 ("%s: Timeout to mask interrupt\n", 1897 __func__)); 1898 } else { 1899 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 1900 ("%s: Unable to mask interrupt %d\n", 1901 __func__, ret)); 1902 AR_DEBUG_ASSERT(ret == 0); 1903 } 1904 } 1905 HIF_EXIT(); 1906 } 1907 1908 /** 1909 * hif_allocate_bus_request() - Allocate hif bus request 1910 * @device: pointer to struct hif_sdio_dev 1911 * 1912 * 1913 * Return: pointer to struct bus_request structure. 1914 */ 1915 struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device) 1916 { 1917 struct bus_request *busrequest; 1918 1919 qdf_spin_lock_irqsave(&device->lock); 1920 busrequest = device->bus_request_free_queue; 1921 /* Remove first in list */ 1922 if (busrequest != NULL) 1923 device->bus_request_free_queue = busrequest->next; 1924 1925 /* Release lock */ 1926 qdf_spin_unlock_irqrestore(&device->lock); 1927 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 1928 ("%s: hif_allocate_bus_request: 0x%pK\n", 1929 __func__, busrequest)); 1930 1931 return busrequest; 1932 } 1933 1934 /** 1935 * hif_free_bus_request() - Free hif bus request 1936 * @device: pointer to struct hif_sdio_dev 1937 * 1938 * 1939 * Return: None. 1940 */ 1941 void hif_free_bus_request(struct hif_sdio_dev *device, 1942 struct bus_request *busrequest) 1943 { 1944 AR_DEBUG_ASSERT(busrequest != NULL); 1945 /* Acquire lock */ 1946 qdf_spin_lock_irqsave(&device->lock); 1947 1948 /* Insert first in list */ 1949 busrequest->next = device->bus_request_free_queue; 1950 busrequest->inusenext = NULL; 1951 device->bus_request_free_queue = busrequest; 1952 1953 /* Release lock */ 1954 qdf_spin_unlock_irqrestore(&device->lock); 1955 } 1956 1957 static QDF_STATUS hif_disable_func(struct hif_sdio_dev *device, 1958 struct sdio_func *func) 1959 { 1960 int ret; 1961 QDF_STATUS status = QDF_STATUS_SUCCESS; 1962 1963 HIF_ENTER(); 1964 device = get_hif_device(func); 1965 if (!IS_ERR(device->async_task)) { 1966 init_completion(&device->async_completion); 1967 device->async_shutdown = 1; 1968 up(&device->sem_async); 1969 wait_for_completion(&device->async_completion); 1970 device->async_task = NULL; 1971 sema_init(&device->sem_async, 0); 1972 } 1973 /* Disable the card */ 1974 sdio_claim_host(device->func); 1975 ret = sdio_disable_func(device->func); 1976 if (ret) 1977 status = QDF_STATUS_E_FAILURE; 1978 1979 if (reset_sdio_on_unload && status == QDF_STATUS_SUCCESS) { 1980 /* reset the SDIO interface. It's useful in automated testing 1981 * where the card does not need to be removed at the end 1982 * of the test. It is expected that the user will also 1983 * un/reload the host controller driver to force the bus 1984 * driver to re-enumerate the slot 1985 */ 1986 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 1987 ("%s: reseting SDIO card", 1988 __func__)); 1989 1990 /* sdio_f0_writeb() cannot be used here, this allows access 1991 * to undefined registers in the range of: 0xF0-0xFF 1992 */ 1993 1994 ret = 1995 func0_cmd52_write_byte(device->func->card, 1996 SDIO_CCCR_ABORT, 1997 (1 << 3)); 1998 if (ret) { 1999 status = QDF_STATUS_E_FAILURE; 2000 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 2001 ("%s: reset failed : %d\n", 2002 __func__, ret)); 2003 } 2004 } 2005 2006 sdio_release_host(device->func); 2007 2008 if (status == QDF_STATUS_SUCCESS) 2009 device->is_disabled = true; 2010 cleanup_hif_scatter_resources(device); 2011 2012 HIF_EXIT(); 2013 2014 return status; 2015 } 2016 2017 static QDF_STATUS hif_enable_func(struct hif_sdio_dev *device, 2018 struct sdio_func *func) 2019 { 2020 struct task_struct *task; 2021 const char *task_name = NULL; 2022 int (*taskFunc)(void *) = NULL; 2023 int ret = QDF_STATUS_SUCCESS; 2024 2025 HIF_ENTER("sdio_func 0x%pK", func); 2026 2027 device = get_hif_device(func); 2028 2029 if (!device) { 2030 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HIF device is NULL\n")); 2031 return QDF_STATUS_E_INVAL; 2032 } 2033 2034 if (device->is_disabled) { 2035 int setAsyncIRQ = 0; 2036 __u16 manufacturer_id = 2037 device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; 2038 /* enable the SDIO function */ 2039 sdio_claim_host(func); 2040 /* enable 4-bit ASYNC interrupt on AR6003x or later devices */ 2041 if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) { 2042 setAsyncIRQ = 1; 2043 ret = 2044 func0_cmd52_write_byte(func->card, 2045 CCCR_SDIO_IRQ_MODE_REG_AR6003, 2046 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003); 2047 } else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE || 2048 manufacturer_id == MANUFACTURER_ID_QCA9377_BASE || 2049 manufacturer_id == MANUFACTURER_ID_QCA9379_BASE) { 2050 unsigned char data = 0; 2051 2052 setAsyncIRQ = 1; 2053 ret = 2054 func0_cmd52_read_byte(func->card, 2055 CCCR_SDIO_IRQ_MODE_REG_AR6320, 2056 &data); 2057 if (ret) { 2058 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 2059 ("%s: failed to read irq reg %d\n", 2060 __func__, ret)); 2061 sdio_release_host(func); 2062 return QDF_STATUS_E_FAILURE; 2063 } 2064 data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; 2065 ret = 2066 func0_cmd52_write_byte(func->card, 2067 CCCR_SDIO_IRQ_MODE_REG_AR6320, 2068 data); 2069 } 2070 if (setAsyncIRQ) { 2071 if (ret) { 2072 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 2073 ("%s: failed to enable ASYNC IRQ mode %d\n", 2074 __func__, ret)); 2075 sdio_release_host(func); 2076 return QDF_STATUS_E_FAILURE; 2077 } 2078 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 2079 ("%s: 4-bit ASYNC IRQ mode enabled\n", 2080 __func__)); 2081 } 2082 2083 /* set CCCR 0xF0[7:6] to increase async interrupt delay clock to 2084 * fix interrupt missing issue on dell 8460p 2085 */ 2086 if (asyncintdelay != 0) { 2087 unsigned char data = 0; 2088 2089 ret = func0_cmd52_read_byte(func->card, 2090 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, 2091 &data); 2092 if (ret) { 2093 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 2094 ("%s: failed to read CCCR %d, val is %d\n", 2095 __func__, 2096 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, 2097 ret)); 2098 sdio_release_host(func); 2099 return QDF_STATUS_E_FAILURE; 2100 } 2101 data = (data & ~CCCR_SDIO_ASYNC_INT_DELAY_MASK) | 2102 ((asyncintdelay << 2103 CCCR_SDIO_ASYNC_INT_DELAY_LSB) & 2104 CCCR_SDIO_ASYNC_INT_DELAY_MASK); 2105 ret = 2106 func0_cmd52_write_byte(func->card, 2107 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, 2108 data); 2109 if (ret) { 2110 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 2111 ("%s: failed to write CCCR %d, val is %d\n", 2112 __func__, 2113 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, 2114 ret)); 2115 sdio_release_host(func); 2116 return QDF_STATUS_E_FAILURE; 2117 } 2118 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2119 ("%s: Set async interrupt delay clock as %d.\n", 2120 __func__, 2121 asyncintdelay)); 2122 } 2123 /* give us some time to enable, in ms */ 2124 func->enable_timeout = 100; 2125 ret = sdio_enable_func(func); 2126 if (ret) { 2127 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2128 ("%s: Unable to enable AR6K: 0x%X\n", 2129 __func__, ret)); 2130 sdio_release_host(func); 2131 return QDF_STATUS_E_FAILURE; 2132 } 2133 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); 2134 2135 if (modstrength) { 2136 unsigned int address = WINDOW_DATA_ADDRESS; 2137 unsigned int value = 0x0FFF; 2138 2139 ret = sdio_memcpy_toio(device->func, address, 2140 &value, 4); 2141 if (ret) { 2142 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2143 ("memcpy_toio 0x%x 0x%x error:%d\n", 2144 address, value, ret)); 2145 } else { 2146 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2147 ("memcpy_toio, 0x%x 0x%x OK\n", address, 2148 value)); 2149 address = WINDOW_WRITE_ADDR_ADDRESS; 2150 value = 0x50F8; 2151 ret = 2152 sdio_memcpy_toio(device->func, address, 2153 &value, 4); 2154 if (ret) 2155 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2156 ("memcpy_toio 0x%x 0x%x error:%d\n", 2157 address, value, ret)); 2158 else 2159 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2160 ("memcpy_toio, 0x%x 0x%x OK\n", 2161 address, value)); 2162 } 2163 }; 2164 sdio_release_host(func); 2165 if (ret) { 2166 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2167 ("%s: can't set block size 0x%x AR6K: 0x%X\n", 2168 __func__, HIF_MBOX_BLOCK_SIZE, 2169 ret)); 2170 return QDF_STATUS_E_FAILURE; 2171 } 2172 device->is_disabled = false; 2173 /* create async I/O thread */ 2174 if (!device->async_task) { 2175 device->async_shutdown = 0; 2176 device->async_task = kthread_create(async_task, 2177 (void *)device, 2178 "AR6K Async"); 2179 if (IS_ERR(device->async_task)) { 2180 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2181 ("%s: to create async task\n", 2182 __func__)); 2183 return QDF_STATUS_E_FAILURE; 2184 } 2185 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 2186 ("%s: start async task\n", 2187 __func__)); 2188 wake_up_process(device->async_task); 2189 } 2190 } 2191 2192 if (!device->claimed_ctx) { 2193 taskFunc = startup_task; 2194 task_name = "AR6K startup"; 2195 ret = QDF_STATUS_SUCCESS; 2196 } else { 2197 taskFunc = enable_task; 2198 task_name = "AR6K enable"; 2199 ret = QDF_STATUS_E_PENDING; 2200 } 2201 /* create resume thread */ 2202 task = kthread_create(taskFunc, (void *)device, task_name); 2203 if (IS_ERR(task)) { 2204 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2205 ("%s: to create enabel task\n", 2206 __func__)); 2207 return QDF_STATUS_E_FAILURE; 2208 } 2209 wake_up_process(task); 2210 2211 /* task will call the enable func, indicate pending */ 2212 HIF_EXIT(); 2213 2214 return ret; 2215 } 2216 2217 int hif_device_suspend(struct device *dev) 2218 { 2219 struct sdio_func *func = dev_to_sdio_func(dev); 2220 QDF_STATUS status = QDF_STATUS_SUCCESS; 2221 int ret = QDF_STATUS_SUCCESS; 2222 #if defined(MMC_PM_KEEP_POWER) 2223 mmc_pm_flag_t pm_flag = 0; 2224 enum HIF_DEVICE_POWER_CHANGE_TYPE config; 2225 struct mmc_host *host = NULL; 2226 #endif 2227 2228 struct hif_sdio_dev *device = get_hif_device(func); 2229 2230 #if defined(MMC_PM_KEEP_POWER) 2231 if (device && device->func) 2232 host = device->func->card->host; 2233 #endif 2234 2235 HIF_ENTER(); 2236 if (device && device->claimed_ctx 2237 && osdrv_callbacks.device_suspend_handler) { 2238 device->is_suspend = true; 2239 status = osdrv_callbacks.device_suspend_handler( 2240 device->claimed_ctx); 2241 #if defined(MMC_PM_KEEP_POWER) 2242 switch (forcesleepmode) { 2243 case 0: /* depend on sdio host pm capbility */ 2244 pm_flag = sdio_get_host_pm_caps(func); 2245 break; 2246 case 1: /* force WOW */ 2247 pm_flag |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; 2248 break; 2249 case 2: /* force DeepSleep */ 2250 pm_flag &= ~MMC_PM_WAKE_SDIO_IRQ; 2251 pm_flag |= MMC_PM_KEEP_POWER; 2252 break; 2253 case 3: /* force CutPower */ 2254 pm_flag &= 2255 ~(MMC_PM_WAKE_SDIO_IRQ | MMC_PM_WAKE_SDIO_IRQ); 2256 break; 2257 } 2258 if (!(pm_flag & MMC_PM_KEEP_POWER)) { 2259 /* cut power support */ 2260 /* setting power_config before hif_configure_device to 2261 * skip sdio r/w when suspending with cut power 2262 */ 2263 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2264 ("hif_device_suspend: cut power enter\n")); 2265 config = HIF_DEVICE_POWER_CUT; 2266 device->power_config = config; 2267 if ((device->claimed_ctx != NULL) 2268 && osdrv_callbacks.device_removed_handler) { 2269 status = osdrv_callbacks. 2270 device_removed_handler(device-> 2271 claimed_ctx, 2272 device); 2273 } 2274 ret = hif_configure_device(device, 2275 HIF_DEVICE_POWER_STATE_CHANGE, 2276 &config, 2277 sizeof 2278 (enum HIF_DEVICE_POWER_CHANGE_TYPE)); 2279 if (ret) { 2280 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2281 ("%s: hif config device failed: %d\n", 2282 __func__, ret)); 2283 return ret; 2284 } 2285 2286 hif_mask_interrupt(device); 2287 device->device_state = HIF_DEVICE_STATE_CUTPOWER; 2288 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2289 ("hif_device_suspend: cut power success\n")); 2290 return ret; 2291 } 2292 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 2293 if (ret) { 2294 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2295 ("%s: set sdio pm flags failed %d\n", 2296 __func__, ret)); 2297 return ret; 2298 } 2299 2300 /* TODO:WOW support */ 2301 if (pm_flag & MMC_PM_WAKE_SDIO_IRQ) { 2302 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2303 ("hif_device_suspend: wow enter\n")); 2304 config = HIF_DEVICE_POWER_DOWN; 2305 ret = hif_configure_device(device, 2306 HIF_DEVICE_POWER_STATE_CHANGE, 2307 &config, 2308 sizeof 2309 (enum HIF_DEVICE_POWER_CHANGE_TYPE)); 2310 2311 if (ret) { 2312 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2313 ("%s: hif config dev failed: %d\n", 2314 __func__, ret)); 2315 return ret; 2316 } 2317 ret = sdio_set_host_pm_flags(func, 2318 MMC_PM_WAKE_SDIO_IRQ); 2319 if (ret) { 2320 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2321 ("%s: set sdio pm flags %d\n", 2322 __func__, ret)); 2323 return ret; 2324 } 2325 hif_mask_interrupt(device); 2326 device->device_state = HIF_DEVICE_STATE_WOW; 2327 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, 2328 ("hif_device_suspend: wow success\n")); 2329 return ret; 2330 } 2331 /* deep sleep support */ 2332 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: deep sleep enter\n", 2333 __func__)); 2334 2335 /* 2336 * Wait for some async clean handler finished. 2337 * These handlers are part of vdev disconnect. 2338 * As handlers are async,sleep is not suggested, 2339 * some blocking method may be a good choice. 2340 * But before adding callback function to these 2341 * handler, sleep wait is a simple method. 2342 */ 2343 msleep(100); 2344 hif_mask_interrupt(device); 2345 device->device_state = HIF_DEVICE_STATE_DEEPSLEEP; 2346 AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: deep sleep done\n", 2347 __func__)); 2348 return ret; 2349 #endif 2350 } 2351 2352 HIF_EXIT(); 2353 2354 switch (status) { 2355 case QDF_STATUS_SUCCESS: 2356 #if defined(MMC_PM_KEEP_POWER) 2357 if (host) { 2358 host->pm_flags &= 2359 ~(MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ); 2360 } 2361 #endif 2362 return 0; 2363 case QDF_STATUS_E_BUSY: 2364 #if defined(MMC_PM_KEEP_POWER) 2365 if (host) { 2366 /* WAKE_SDIO_IRQ in order to wake up by DAT1 */ 2367 host->pm_flags |= 2368 (MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ); 2369 host->pm_flags &= host->pm_caps; 2370 } 2371 return 0; 2372 #else 2373 return -EBUSY; /* Hack to support deep sleep and wow */ 2374 #endif 2375 default: 2376 device->is_suspend = false; 2377 2378 return QDF_STATUS_E_FAILURE; 2379 } 2380 } 2381 2382 int hif_device_resume(struct device *dev) 2383 { 2384 struct sdio_func *func = dev_to_sdio_func(dev); 2385 QDF_STATUS status = QDF_STATUS_SUCCESS; 2386 enum HIF_DEVICE_POWER_CHANGE_TYPE config; 2387 struct hif_sdio_dev *device; 2388 2389 device = get_hif_device(func); 2390 if (!device) { 2391 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("get hif device failed\n")); 2392 return QDF_STATUS_E_FAILURE; 2393 } 2394 2395 if (device->device_state == HIF_DEVICE_STATE_CUTPOWER) { 2396 config = HIF_DEVICE_POWER_UP; 2397 status = hif_configure_device(device, 2398 HIF_DEVICE_POWER_STATE_CHANGE, 2399 &config, 2400 sizeof(enum 2401 HIF_DEVICE_POWER_CHANGE_TYPE)); 2402 if (status) { 2403 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2404 ("%s: hif_configure_device failed\n", 2405 __func__)); 2406 return status; 2407 } 2408 } else if (device->device_state == HIF_DEVICE_STATE_DEEPSLEEP) { 2409 hif_un_mask_interrupt(device); 2410 } else if (device->device_state == HIF_DEVICE_STATE_WOW) { 2411 /*TODO:WOW support */ 2412 hif_un_mask_interrupt(device); 2413 } 2414 2415 /* 2416 * device_resume_handler do nothing now. If some operation 2417 * should be added to this handler in power cut 2418 * resume flow, do make sure those operation is not 2419 * depent on what startup_task has done,or the resume 2420 * flow will block. 2421 */ 2422 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 2423 ("%s: +hif_device_resume\n", 2424 __func__)); 2425 if (device->claimed_ctx 2426 && osdrv_callbacks.device_suspend_handler) { 2427 status = 2428 osdrv_callbacks.device_resume_handler(device->claimed_ctx); 2429 device->is_suspend = false; 2430 } 2431 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 2432 ("%s: -hif_device_resume\n", 2433 __func__)); 2434 device->device_state = HIF_DEVICE_STATE_ON; 2435 2436 return QDF_IS_STATUS_SUCCESS(status) ? 0 : status; 2437 } 2438 2439 static void hif_device_removed(struct sdio_func *func) 2440 { 2441 QDF_STATUS status = QDF_STATUS_SUCCESS; 2442 struct hif_sdio_dev *device; 2443 int i; 2444 2445 AR_DEBUG_ASSERT(func != NULL); 2446 HIF_ENTER(); 2447 device = get_hif_device(func); 2448 2449 if (device->power_config == HIF_DEVICE_POWER_CUT) { 2450 device->func = NULL; /* func will be free by mmc stack */ 2451 return; /* Just return for cut-off mode */ 2452 } 2453 for (i = 0; i < MAX_HIF_DEVICES; ++i) { 2454 if (hif_devices[i] == device) 2455 hif_devices[i] = NULL; 2456 } 2457 2458 if (device->claimed_ctx != NULL) 2459 status = 2460 osdrv_callbacks.device_removed_handler(device->claimed_ctx, 2461 device); 2462 2463 hif_mask_interrupt(device); 2464 2465 if (device->is_disabled) 2466 device->is_disabled = false; 2467 else 2468 status = hif_disable_func(device, func); 2469 2470 2471 del_hif_device(device); 2472 if (status != QDF_STATUS_SUCCESS) 2473 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, 2474 ("%s: Unable to disable sdio func\n", 2475 __func__)); 2476 2477 HIF_EXIT(); 2478 } 2479 2480 /* 2481 * This should be moved to AR6K HTC layer. 2482 */ 2483 QDF_STATUS hif_wait_for_pending_recv(struct hif_sdio_dev *device) 2484 { 2485 int32_t cnt = 10; 2486 uint8_t host_int_status; 2487 QDF_STATUS status = QDF_STATUS_SUCCESS; 2488 2489 do { 2490 while (atomic_read(&device->irq_handling)) { 2491 /* wait until irq handler finished all the jobs */ 2492 schedule_timeout_interruptible(HZ / 10); 2493 } 2494 /* check if there is any pending irq due to force done */ 2495 host_int_status = 0; 2496 status = hif_read_write(device, HOST_INT_STATUS_ADDRESS, 2497 (uint8_t *) &host_int_status, 2498 sizeof(host_int_status), 2499 HIF_RD_SYNC_BYTE_INC, NULL); 2500 host_int_status = 2501 QDF_IS_STATUS_SUCCESS(status) ? 2502 (host_int_status & (1 << 0)) : 0; 2503 if (host_int_status) 2504 /* wait until irq handler finishs its job */ 2505 schedule_timeout_interruptible(1); 2506 } while (host_int_status && --cnt > 0); 2507 2508 if (host_int_status && cnt == 0) 2509 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2510 ("%s: Unable clear up pending IRQ\n", 2511 __func__)); 2512 2513 return QDF_STATUS_SUCCESS; 2514 } 2515 2516 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) && \ 2517 !defined(WITH_BACKPORTS) 2518 /** 2519 * hif_sdio_set_drvdata() - set driver data 2520 * @func: pointer to sdio function 2521 * @hifdevice: pointer to hif device 2522 * 2523 * Return: non zero for success. 2524 */ 2525 static inline int hif_sdio_set_drvdata(struct sdio_func *func, 2526 struct hif_sdio_dev *hifdevice) 2527 { 2528 return sdio_set_drvdata(func, hifdevice); 2529 } 2530 #else 2531 static inline int hif_sdio_set_drvdata(struct sdio_func *func, 2532 struct hif_sdio_dev *hifdevice) 2533 { 2534 sdio_set_drvdata(func, hifdevice); 2535 return 0; 2536 } 2537 #endif 2538 2539 static struct hif_sdio_dev *add_hif_device(struct sdio_func *func) 2540 { 2541 struct hif_sdio_dev *hifdevice = NULL; 2542 int ret = 0; 2543 2544 HIF_ENTER(); 2545 AR_DEBUG_ASSERT(func != NULL); 2546 hifdevice = (struct hif_sdio_dev *) qdf_mem_malloc(sizeof( 2547 struct hif_sdio_dev)); 2548 AR_DEBUG_ASSERT(hifdevice != NULL); 2549 if (hifdevice == NULL) { 2550 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("Alloc hif device fail\n")); 2551 return NULL; 2552 } 2553 #if HIF_USE_DMA_BOUNCE_BUFFER 2554 hifdevice->dma_buffer = qdf_mem_malloc(HIF_DMA_BUFFER_SIZE); 2555 AR_DEBUG_ASSERT(hifdevice->dma_buffer != NULL); 2556 if (hifdevice->dma_buffer == NULL) { 2557 qdf_mem_free(hifdevice); 2558 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("Alloc dma buffer fail\n")); 2559 return NULL; 2560 } 2561 #endif 2562 hifdevice->func = func; 2563 hifdevice->power_config = HIF_DEVICE_POWER_UP; 2564 hifdevice->device_state = HIF_DEVICE_STATE_ON; 2565 ret = hif_sdio_set_drvdata(func, hifdevice); 2566 HIF_EXIT("status %d", ret); 2567 2568 return hifdevice; 2569 } 2570 2571 static struct hif_sdio_dev *get_hif_device(struct sdio_func *func) 2572 { 2573 AR_DEBUG_ASSERT(func != NULL); 2574 2575 return (struct hif_sdio_dev *) sdio_get_drvdata(func); 2576 } 2577 2578 static void del_hif_device(struct hif_sdio_dev *device) 2579 { 2580 AR_DEBUG_ASSERT(device != NULL); 2581 AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, 2582 ("%s: deleting hif device 0x%pK\n", 2583 __func__, device)); 2584 if (device->dma_buffer != NULL) 2585 qdf_mem_free(device->dma_buffer); 2586 2587 qdf_mem_free(device); 2588 } 2589 2590 static void reset_all_cards(void) 2591 { 2592 } 2593 2594 QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device, 2595 struct htc_callbacks *callbacks) 2596 { 2597 if (device->htc_callbacks.context != NULL) 2598 /* already in use! */ 2599 return QDF_STATUS_E_FAILURE; 2600 device->htc_callbacks = *callbacks; 2601 2602 return QDF_STATUS_SUCCESS; 2603 } 2604 2605 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 2606 { 2607 struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); 2608 struct hif_sdio_dev *hif_device = scn->hif_handle; 2609 2610 qdf_mem_zero(&hif_device->htc_callbacks, 2611 sizeof(hif_device->htc_callbacks)); 2612 } 2613 2614 #define SDIO_SET_CMD52_ARG(arg, rw, func, raw, address, writedata) \ 2615 ((arg) = (((rw) & 1) << 31) | \ 2616 ((func & 0x7) << 28) | \ 2617 (((raw) & 1) << 27) | \ 2618 (1 << 26) | \ 2619 (((address) & 0x1FFFF) << 9) | \ 2620 (1 << 8) | \ 2621 ((writedata) & 0xFF)) 2622 2623 #define SDIO_SET_CMD52_READ_ARG(arg, func, address) \ 2624 SDIO_SET_CMD52_ARG(arg, 0, (func), 0, address, 0x00) 2625 #define SDIO_SET_CMD52_WRITE_ARG(arg, func, address, value) \ 2626 SDIO_SET_CMD52_ARG(arg, 1, (func), 0, address, value) 2627 2628 static int func0_cmd52_write_byte(struct mmc_card *card, 2629 unsigned int address, 2630 unsigned char byte) 2631 { 2632 struct mmc_command io_cmd; 2633 unsigned long arg; 2634 int status = 0; 2635 2636 memset(&io_cmd, 0, sizeof(io_cmd)); 2637 SDIO_SET_CMD52_WRITE_ARG(arg, 0, address, byte); 2638 io_cmd.opcode = SD_IO_RW_DIRECT; 2639 io_cmd.arg = arg; 2640 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; 2641 status = mmc_wait_for_cmd(card->host, &io_cmd, 0); 2642 2643 if (status) 2644 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2645 ("%s: mmc_wait_for_cmd returned %d\n", 2646 __func__, status)); 2647 2648 return status; 2649 } 2650 2651 static int func0_cmd52_read_byte(struct mmc_card *card, 2652 unsigned int address, 2653 unsigned char *byte) 2654 { 2655 struct mmc_command io_cmd; 2656 unsigned long arg; 2657 int32_t err; 2658 2659 memset(&io_cmd, 0, sizeof(io_cmd)); 2660 SDIO_SET_CMD52_READ_ARG(arg, 0, address); 2661 io_cmd.opcode = SD_IO_RW_DIRECT; 2662 io_cmd.arg = arg; 2663 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; 2664 2665 err = mmc_wait_for_cmd(card->host, &io_cmd, 0); 2666 2667 if ((!err) && (byte)) 2668 *byte = io_cmd.resp[0] & 0xFF; 2669 2670 if (err) 2671 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2672 ("%s: mmc_wait_for_cmd returned %d\n", 2673 __func__, err)); 2674 2675 return err; 2676 } 2677 2678 void hif_dump_cccr(struct hif_sdio_dev *hif_device) 2679 { 2680 int i; 2681 uint8_t cccr_val; 2682 uint32_t err; 2683 2684 if (!hif_device || !hif_device->func || 2685 !hif_device->func->card) { 2686 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2687 ("hif_dump_cccr incorrect input arguments\n")); 2688 return; 2689 } 2690 2691 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("hif_dump_cccr ")); 2692 for (i = 0; i <= 0x16; i++) { 2693 err = func0_cmd52_read_byte(hif_device->func->card, 2694 i, &cccr_val); 2695 if (err) { 2696 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2697 ("Reading CCCR 0x%02X failed: %d\n", 2698 (unsigned int)i, (unsigned int)err)); 2699 } else { 2700 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, 2701 ("%X(%X) ", (unsigned int)i, 2702 (unsigned int)cccr_val)); 2703 } 2704 } 2705 2706 AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("\n")); 2707 } 2708 2709 int hif_sdio_device_inserted(struct device *dev, 2710 const struct sdio_device_id *id) 2711 { 2712 struct sdio_func *func = dev_to_sdio_func(dev); 2713 2714 return hif_device_inserted(func, id); 2715 } 2716 2717 void hif_sdio_device_removed(struct sdio_func *func) 2718 { 2719 hif_device_removed(func); 2720 } 2721