xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/mmc.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/sdio_func.h>
23 #include <linux/mmc/sdio_ids.h>
24 #include <linux/mmc/sdio.h>
25 #include <linux/mmc/sd.h>
26 #include <linux/kthread.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <qdf_atomic.h>
30 #include <cds_utils.h>
31 #include <qdf_timer.h>
32 #include <cds_api.h>
33 #include <qdf_time.h>
34 #include "hif_sdio_dev.h"
35 #include "if_sdio.h"
36 #include "regtable_sdio.h"
37 #include "wma_api.h"
38 #include "hif_internal.h"
39 #include <transfer/transfer.h>
40 
41 /* by default setup a bounce buffer for the data packets,
42  * if the underlying host controller driver
43  * does not use DMA you may be able to skip this step
44  * and save the memory allocation and transfer time
45  */
46 #define HIF_USE_DMA_BOUNCE_BUFFER 1
47 #define ATH_MODULE_NAME hif
48 #include "a_debug.h"
49 
50 #if HIF_USE_DMA_BOUNCE_BUFFER
51 /* macro to check if DMA buffer is WORD-aligned and DMA-able.
52  * Most host controllers assume the
53  * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
54  * virt_addr_valid check fails on stack memory.
55  */
56 #define BUFFER_NEEDS_BOUNCE(buffer)  (((unsigned long)(buffer) & 0x3) || \
57 					!virt_addr_valid((buffer)))
58 #else
59 #define BUFFER_NEEDS_BOUNCE(buffer)   (false)
60 #endif
61 #define MAX_HIF_DEVICES 2
62 #ifdef HIF_MBOX_SLEEP_WAR
63 #define HIF_MIN_SLEEP_INACTIVITY_TIME_MS     50
64 #define HIF_SLEEP_DISABLE_UPDATE_DELAY 1
65 #define HIF_IS_WRITE_REQUEST_MBOX1_TO_3(request) \
66 				((request->request & HIF_SDIO_WRITE) && \
67 				(request->address >= 0x1000 && \
68 				request->address < 0x1FFFF))
69 #endif
70 unsigned int forcesleepmode;
71 module_param(forcesleepmode, uint, 0644);
72 MODULE_PARM_DESC(forcesleepmode,
73 		"Set sleep mode: 0-host capbility, 1-force WOW, 2-force DeepSleep, 3-force CutPower");
74 
75 unsigned int forcecard;
76 module_param(forcecard, uint, 0644);
77 MODULE_PARM_DESC(forcecard,
78 		 "Ignore card capabilities information to switch bus mode");
79 
80 unsigned int debugcccr = 1;
81 module_param(debugcccr, uint, 0644);
82 MODULE_PARM_DESC(debugcccr, "Output this cccr values");
83 
84 #define dev_to_sdio_func(d)		container_of(d, struct sdio_func, dev)
85 #define to_sdio_driver(d)		container_of(d, struct sdio_driver, drv)
86 static struct hif_sdio_dev *add_hif_device(struct sdio_func *func);
87 static void del_hif_device(struct hif_sdio_dev *device);
88 
89 int reset_sdio_on_unload;
90 module_param(reset_sdio_on_unload, int, 0644);
91 
92 uint32_t nohifscattersupport = 1;
93 
94 /* ------ Static Variables ------ */
95 static const struct sdio_device_id ar6k_id_table[] = {
96 #ifdef AR6002_HEADERS_DEF
97 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x0))},
98 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x1))},
99 #endif
100 #ifdef AR6003_HEADERS_DEF
101 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
102 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
103 #endif
104 #ifdef AR6004_HEADERS_DEF
105 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
106 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
107 #endif
108 #ifdef AR6320_HEADERS_DEF
109 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x0))},
110 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x1))},
111 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x2))},
112 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x3))},
113 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x4))},
114 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x5))},
115 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x6))},
116 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x7))},
117 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x8))},
118 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x9))},
119 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xA))},
120 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xB))},
121 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xC))},
122 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xD))},
123 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xE))},
124 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xF))},
125 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x0))},
126 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x1))},
127 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x2))},
128 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x3))},
129 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x4))},
130 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x5))},
131 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x6))},
132 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x7))},
133 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x8))},
134 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x9))},
135 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xA))},
136 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xB))},
137 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xC))},
138 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xD))},
139 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xE))},
140 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xF))},
141 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x0))},
142 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x1))},
143 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x2))},
144 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x3))},
145 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x4))},
146 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x5))},
147 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x6))},
148 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x7))},
149 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x8))},
150 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x9))},
151 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xA))},
152 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xB))},
153 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xC))},
154 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xD))},
155 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xE))},
156 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xF))},
157 	{SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x0))},
158 	{SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x1))},
159 #endif
160 	{ /* null */ },
161 };
162 
163 struct hif_sdio_softc *scn;
164 
165 static struct hif_sdio_dev *hif_devices[MAX_HIF_DEVICES];
166 
167 #if defined(WLAN_DEBUG) || defined(DEBUG)
168 ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif,
169 				 "hif",
170 				 "(Linux MMC) Host Interconnect Framework",
171 				 ATH_DEBUG_MASK_DEFAULTS, 0, NULL);
172 #endif
173 
174 /**
175  * __hif_read_write() - sdio read/write wrapper
176  * @device: pointer to hif device structure
177  * @address: address to read
178  * @buffer: buffer to hold read/write data
179  * @length: length to read/write
180  * @request: read/write/sync/async request
181  * @context: pointer to hold calling context
182  *
183  * Return: 0 on success, error number otherwise.
184  */
185 static QDF_STATUS
186 __hif_read_write(struct hif_sdio_dev *device,
187 		 uint32_t address, char *buffer,
188 		 uint32_t length, uint32_t request, void *context)
189 {
190 	uint8_t opcode;
191 	QDF_STATUS status = QDF_STATUS_SUCCESS;
192 	int ret = A_OK;
193 	uint8_t *tbuffer;
194 	bool bounced = false;
195 
196 	if (device == NULL) {
197 		HIF_ERROR("%s: device null!", __func__);
198 		return QDF_STATUS_E_INVAL;
199 	}
200 
201 	if (device->func == NULL) {
202 		HIF_ERROR("%s: func null!", __func__);
203 		return QDF_STATUS_E_INVAL;
204 	}
205 
206 	HIF_INFO_HI("%s: addr:0X%06X, len:%08d, %s, %s", __func__,
207 		    address, length,
208 		    request & HIF_SDIO_READ ? "Read " : "Write",
209 		    request & HIF_ASYNCHRONOUS ? "Async" : "Sync ");
210 
211 	do {
212 		if (request & HIF_EXTENDED_IO) {
213 			HIF_INFO_HI("%s: Command type: CMD53\n", __func__);
214 		} else {
215 			HIF_ERROR("%s: Invalid command type: 0x%08x\n",
216 				  __func__, request);
217 			status = QDF_STATUS_E_INVAL;
218 			break;
219 		}
220 
221 		if (request & HIF_BLOCK_BASIS) {
222 			/* round to whole block length size */
223 			length =
224 				(length / HIF_BLOCK_SIZE) *
225 				HIF_BLOCK_SIZE;
226 			HIF_INFO_HI("%s: Block mode (BlockLen: %d)\n",
227 				    __func__, length);
228 		} else if (request & HIF_BYTE_BASIS) {
229 			HIF_INFO_HI("%s: Byte mode (BlockLen: %d)\n",
230 				    __func__, length);
231 		} else {
232 			HIF_ERROR("%s: Invalid data mode: 0x%08x\n",
233 				  __func__, request);
234 			status = QDF_STATUS_E_INVAL;
235 			break;
236 		}
237 		if (request & HIF_SDIO_WRITE) {
238 			hif_fixup_write_param(device, request,
239 					      &length, &address);
240 
241 			HIF_INFO_HI("addr:%08X, len:0x%08X, dummy:0x%04X\n",
242 				    address, length,
243 				    (request & HIF_DUMMY_SPACE_MASK) >> 16);
244 		}
245 
246 		if (request & HIF_FIXED_ADDRESS) {
247 			opcode = CMD53_FIXED_ADDRESS;
248 			HIF_INFO_HI("%s: Addr mode: fixed 0x%X\n",
249 				    __func__, address);
250 		} else if (request & HIF_INCREMENTAL_ADDRESS) {
251 			opcode = CMD53_INCR_ADDRESS;
252 			HIF_INFO_HI("%s: Address mode: Incremental 0x%X\n",
253 				    __func__, address);
254 		} else {
255 			HIF_ERROR("%s: Invalid address mode: 0x%08x\n",
256 				  __func__, request);
257 			status = QDF_STATUS_E_INVAL;
258 			break;
259 		}
260 
261 		if (request & HIF_SDIO_WRITE) {
262 #if HIF_USE_DMA_BOUNCE_BUFFER
263 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
264 				AR_DEBUG_ASSERT(device->dma_buffer != NULL);
265 				tbuffer = device->dma_buffer;
266 				/* copy the write data to the dma buffer */
267 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
268 				if (length > HIF_DMA_BUFFER_SIZE) {
269 					HIF_ERROR("%s: Invalid write len: %d\n",
270 						  __func__, length);
271 					status = QDF_STATUS_E_INVAL;
272 					break;
273 				}
274 				memcpy(tbuffer, buffer, length);
275 				bounced = true;
276 			} else {
277 				tbuffer = buffer;
278 			}
279 #else
280 			tbuffer = buffer;
281 #endif
282 			if (opcode == CMD53_FIXED_ADDRESS  && tbuffer != NULL) {
283 				ret = sdio_writesb(device->func, address,
284 						   tbuffer, length);
285 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
286 					    __func__, ret, address, length,
287 					    *(int *)tbuffer);
288 			} else if (tbuffer) {
289 				ret = sdio_memcpy_toio(device->func, address,
290 						       tbuffer, length);
291 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
292 					    __func__, ret, address, length,
293 					    *(int *)tbuffer);
294 			}
295 		} else if (request & HIF_SDIO_READ) {
296 #if HIF_USE_DMA_BOUNCE_BUFFER
297 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
298 				AR_DEBUG_ASSERT(device->dma_buffer != NULL);
299 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
300 				if (length > HIF_DMA_BUFFER_SIZE) {
301 					AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
302 					("%s: Invalid read length: %d\n",
303 					__func__, length));
304 					status = QDF_STATUS_E_INVAL;
305 					break;
306 				}
307 				tbuffer = device->dma_buffer;
308 				bounced = true;
309 			} else {
310 				tbuffer = buffer;
311 			}
312 #else
313 			tbuffer = buffer;
314 #endif
315 			if (opcode == CMD53_FIXED_ADDRESS && tbuffer != NULL) {
316 				ret = sdio_readsb(device->func, tbuffer,
317 						  address, length);
318 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
319 					    __func__, ret, address, length,
320 					    *(int *)tbuffer);
321 			} else if (tbuffer) {
322 				ret = sdio_memcpy_fromio(device->func,
323 							 tbuffer, address,
324 							 length);
325 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
326 					    __func__, ret, address, length,
327 					    *(int *)tbuffer);
328 			}
329 #if HIF_USE_DMA_BOUNCE_BUFFER
330 			if (bounced && tbuffer)
331 				memcpy(buffer, tbuffer, length);
332 #endif
333 		} else {
334 			HIF_ERROR("%s: Invalid dir: 0x%08x", __func__, request);
335 			status = QDF_STATUS_E_INVAL;
336 			return status;
337 		}
338 
339 		if (ret) {
340 			HIF_ERROR("%s: SDIO bus operation failed!", __func__);
341 			HIF_ERROR("%s: MMC stack returned : %d", __func__, ret);
342 			HIF_ERROR("%s: addr:0X%06X, len:%08d, %s, %s",
343 				  __func__, address, length,
344 				  request & HIF_SDIO_READ ? "Read " : "Write",
345 				  request & HIF_ASYNCHRONOUS ?
346 				  "Async" : "Sync");
347 			status = QDF_STATUS_E_FAILURE;
348 		}
349 	} while (false);
350 
351 	return status;
352 }
353 
354 /**
355  * add_to_async_list() - add bus reqest to async task list
356  * @device: pointer to hif device
357  * @busrequest: pointer to type of bus request
358  *
359  * Return: None.
360  */
361 void add_to_async_list(struct hif_sdio_dev *device,
362 		      struct bus_request *busrequest)
363 {
364 	struct bus_request *async;
365 	struct bus_request *active;
366 
367 	qdf_spin_lock_irqsave(&device->asynclock);
368 	active = device->asyncreq;
369 	if (active == NULL) {
370 		device->asyncreq = busrequest;
371 		device->asyncreq->inusenext = NULL;
372 	} else {
373 		for (async = device->asyncreq;
374 		     async != NULL; async = async->inusenext) {
375 			active = async;
376 		}
377 		active->inusenext = busrequest;
378 		busrequest->inusenext = NULL;
379 	}
380 	qdf_spin_unlock_irqrestore(&device->asynclock);
381 }
382 
383 /**
384  * hif_read_write() - queue a read/write request
385  * @device: pointer to hif device structure
386  * @address: address to read
387  * @buffer: buffer to hold read/write data
388  * @length: length to read/write
389  * @request: read/write/sync/async request
390  * @context: pointer to hold calling context
391  *
392  * Return: 0 on success, error number otherwise.
393  */
394 QDF_STATUS
395 hif_read_write(struct hif_sdio_dev *device,
396 		uint32_t address,
397 		char *buffer, uint32_t length,
398 		uint32_t request, void *context)
399 {
400 	QDF_STATUS status = QDF_STATUS_SUCCESS;
401 	struct bus_request *busrequest;
402 
403 	AR_DEBUG_ASSERT(device != NULL);
404 	AR_DEBUG_ASSERT(device->func != NULL);
405 	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
406 			("%s: device 0x%pK addr 0x%X buffer 0x%pK len %d req 0x%X context 0x%pK",
407 			 __func__, device, address, buffer,
408 			 length, request, context));
409 
410 	/*sdio r/w action is not needed when suspend, so just return */
411 	if ((device->is_suspend == true)
412 	    && (device->power_config == HIF_DEVICE_POWER_CUT)) {
413 		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
414 		return QDF_STATUS_SUCCESS;
415 	}
416 	do {
417 		if ((request & HIF_ASYNCHRONOUS) ||
418 			(request & HIF_SYNCHRONOUS)) {
419 			/* serialize all requests through the async thread */
420 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
421 					("%s: Execution mode: %s\n", __func__,
422 					 (request & HIF_ASYNCHRONOUS) ? "Async"
423 					 : "Synch"));
424 			busrequest = hif_allocate_bus_request(device);
425 			if (busrequest == NULL) {
426 				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
427 					("no async bus requests available (%s, addr:0x%X, len:%d)\n",
428 					 request & HIF_SDIO_READ ? "READ" :
429 					 "WRITE", address, length));
430 				return QDF_STATUS_E_FAILURE;
431 			}
432 			busrequest->address = address;
433 			busrequest->buffer = buffer;
434 			busrequest->length = length;
435 			busrequest->request = request;
436 			busrequest->context = context;
437 
438 			add_to_async_list(device, busrequest);
439 
440 			if (request & HIF_SYNCHRONOUS) {
441 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
442 					("%s: queued sync req: 0x%lX\n",
443 					 __func__, (unsigned long)busrequest));
444 
445 				/* wait for completion */
446 				up(&device->sem_async);
447 				if (down_interruptible(&busrequest->sem_req) !=
448 				    0) {
449 					/* interrupted, exit */
450 					return QDF_STATUS_E_FAILURE;
451 				} else {
452 					QDF_STATUS status = busrequest->status;
453 
454 					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
455 				    ("%s: sync return freeing 0x%lX: 0x%X\n",
456 						 __func__,
457 						 (unsigned long)
458 						 busrequest,
459 						 busrequest->status));
460 					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
461 						("%s: freeing req: 0x%X\n",
462 						 __func__,
463 						 (unsigned int)
464 						 request));
465 					hif_free_bus_request(device,
466 						busrequest);
467 					return status;
468 				}
469 			} else {
470 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
471 					("%s: queued async req: 0x%lX\n",
472 						__func__,
473 						 (unsigned long)busrequest));
474 				up(&device->sem_async);
475 				return QDF_STATUS_E_PENDING;
476 			}
477 		} else {
478 			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
479 				("%s: Invalid execution mode: 0x%08x\n",
480 					__func__,
481 					 (unsigned int)request));
482 			status = QDF_STATUS_E_INVAL;
483 			break;
484 		}
485 	} while (0);
486 
487 	return status;
488 }
489 
490 /**
491  * async_task() - thread function to serialize all bus requests
492  * @param: pointer to hif device
493  *
494  * thread function to serialize all requests, both sync and async
495  * Return: 0 on success, error number otherwise.
496  */
497 static int async_task(void *param)
498 {
499 	struct hif_sdio_dev *device;
500 	struct bus_request *request;
501 	QDF_STATUS status;
502 
503 	device = (struct hif_sdio_dev *) param;
504 	set_current_state(TASK_INTERRUPTIBLE);
505 	while (!device->async_shutdown) {
506 		/* wait for work */
507 		if (down_interruptible(&device->sem_async) != 0) {
508 			/* interrupted, exit */
509 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
510 					("%s: async task interrupted\n",
511 					 __func__));
512 			break;
513 		}
514 		if (device->async_shutdown) {
515 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
516 					("%s: async task stopping\n",
517 					 __func__));
518 			break;
519 		}
520 		/* we want to hold the host over multiple cmds
521 		 * if possible, but holding the host blocks
522 		 * card interrupts
523 		 */
524 		sdio_claim_host(device->func);
525 		qdf_spin_lock_irqsave(&device->asynclock);
526 		/* pull the request to work on */
527 		while (device->asyncreq != NULL) {
528 			request = device->asyncreq;
529 			if (request->inusenext != NULL)
530 				device->asyncreq = request->inusenext;
531 			else
532 				device->asyncreq = NULL;
533 			qdf_spin_unlock_irqrestore(&device->asynclock);
534 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
535 				("%s: async_task processing req: 0x%lX\n",
536 				 __func__, (unsigned long)request));
537 
538 			if (request->scatter_req != NULL) {
539 				A_ASSERT(device->scatter_enabled);
540 				/* pass the request to scatter routine which
541 				 * executes it synchronously, note, no need
542 				 * to free the request since scatter requests
543 				 * are maintained on a separate list
544 				 */
545 				status = do_hif_read_write_scatter(device,
546 							request);
547 			} else {
548 				/* call hif_read_write in sync mode */
549 				status =
550 					__hif_read_write(device,
551 							 request->address,
552 							 request->buffer,
553 							 request->length,
554 							 request->
555 							 request &
556 							 ~HIF_SYNCHRONOUS,
557 							 NULL);
558 				if (request->request & HIF_ASYNCHRONOUS) {
559 					void *context = request->context;
560 
561 					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
562 					("%s: freeing req: 0x%lX\n",
563 						 __func__, (unsigned long)
564 						 request));
565 					hif_free_bus_request(device, request);
566 					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
567 				      ("%s: async_task completion req 0x%lX\n",
568 						 __func__, (unsigned long)
569 						 request));
570 					device->htc_callbacks.
571 					rw_compl_handler(context, status);
572 				} else {
573 					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
574 				      ("%s: async_task upping req: 0x%lX\n",
575 						 __func__, (unsigned long)
576 						 request));
577 					request->status = status;
578 					up(&request->sem_req);
579 				}
580 			}
581 			qdf_spin_lock_irqsave(&device->asynclock);
582 		}
583 		qdf_spin_unlock_irqrestore(&device->asynclock);
584 		sdio_release_host(device->func);
585 	}
586 
587 	complete_and_exit(&device->async_completion, 0);
588 
589 	return 0;
590 }
591 
592 /*
593  * Setup IRQ mode for deep sleep and WoW
594  * Switch back to 1 bits mode when we suspend for
595  * WoW in order to detect SDIO irq without clock.
596  * Re-enable async 4-bit irq mode for some host controllers
597  * after resume.
598  */
599 static int sdio_enable4bits(struct hif_sdio_dev *device, int enable)
600 {
601 	int ret = 0;
602 	struct sdio_func *func = device->func;
603 	struct mmc_card *card = func->card;
604 	struct mmc_host *host = card->host;
605 
606 	if (!(host->caps & (MMC_CAP_4_BIT_DATA)))
607 		return 0;
608 
609 	if (card->cccr.low_speed && !card->cccr.wide_bus)
610 		return 0;
611 
612 	sdio_claim_host(func);
613 	do {
614 		int setAsyncIRQ = 0;
615 		__u16 manufacturer_id =
616 			device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK;
617 
618 		/* Re-enable 4-bit ASYNC interrupt on AR6003x
619 		 * after system resume for some host controller
620 		 */
621 		if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) {
622 			setAsyncIRQ = 1;
623 			ret =
624 				func0_cmd52_write_byte(func->card,
625 					    CCCR_SDIO_IRQ_MODE_REG_AR6003,
626 					    enable ?
627 					    SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003
628 					    : 0);
629 		} else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE ||
630 			     manufacturer_id == MANUFACTURER_ID_QCA9377_BASE ||
631 			     manufacturer_id == MANUFACTURER_ID_QCA9379_BASE) {
632 			unsigned char data = 0;
633 
634 			setAsyncIRQ = 1;
635 			ret =
636 				func0_cmd52_read_byte(func->card,
637 					      CCCR_SDIO_IRQ_MODE_REG_AR6320,
638 						   &data);
639 			if (ret) {
640 				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
641 					("%s: failed to read interrupt extension register %d\n",
642 						 __func__, ret));
643 				sdio_release_host(func);
644 				return ret;
645 			}
646 			if (enable)
647 				data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320;
648 			else
649 				data &= ~SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320;
650 			ret =
651 				func0_cmd52_write_byte(func->card,
652 					       CCCR_SDIO_IRQ_MODE_REG_AR6320,
653 					       data);
654 		}
655 		if (setAsyncIRQ) {
656 			if (ret) {
657 				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
658 					("%s: failed to setup 4-bit ASYNC IRQ mode into %d err %d\n",
659 					 __func__, enable, ret));
660 			} else {
661 				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
662 					("%s: Setup 4-bit ASYNC IRQ mode into %d successfully\n",
663 					 __func__, enable));
664 			}
665 		}
666 	} while (0);
667 	sdio_release_host(func);
668 
669 	return ret;
670 }
671 
672 static QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
673 				   struct sdio_func *func,
674 				   bool reset)
675 {
676 	QDF_STATUS status = QDF_STATUS_SUCCESS;
677 
678 	HIF_ENTER();
679 	device = get_hif_device(func);
680 	if (!IS_ERR(device->async_task)) {
681 		init_completion(&device->async_completion);
682 		device->async_shutdown = 1;
683 		up(&device->sem_async);
684 		wait_for_completion(&device->async_completion);
685 		device->async_task = NULL;
686 		sema_init(&device->sem_async, 0);
687 	}
688 
689 	status = hif_sdio_func_disable(device, func, reset);
690 	if (status == QDF_STATUS_SUCCESS)
691 		device->is_disabled = true;
692 
693 	cleanup_hif_scatter_resources(device);
694 
695 	HIF_EXIT();
696 
697 	return status;
698 }
699 
700 /**
701  * hif_sdio_probe() - configure sdio device
702  * @ol_sc: HIF device context
703  * @func: SDIO function context
704  * @device: pointer to hif handle
705  *
706  * Return: 0 for success and non-zero for failure
707  */
708 static A_STATUS hif_sdio_probe(struct hif_softc *ol_sc,
709 			       struct sdio_func *func,
710 			       struct hif_sdio_dev *device)
711 {
712 	int ret = 0;
713 	const struct sdio_device_id *id;
714 	uint32_t target_type;
715 
716 	HIF_ENTER();
717 	scn = (struct hif_sdio_softc *)ol_sc;
718 
719 	scn->hif_handle = device;
720 	spin_lock_init(&scn->target_lock);
721 	/*
722 	 * Attach Target register table. This is needed early on
723 	 * even before BMI since PCI and HIF initialization
724 	 * directly access Target registers.
725 	 *
726 	 * TBDXXX: targetdef should not be global -- should be stored
727 	 * in per-device struct so that we can support multiple
728 	 * different Target types with a single Host driver.
729 	 * The whole notion of an "hif type" -- (not as in the hif
730 	 * module, but generic "Host Interface Type") is bizarre.
731 	 * At first, one one expect it to be things like SDIO, USB, PCI.
732 	 * But instead, it's an actual platform type. Inexplicably, the
733 	 * values used for HIF platform types are *different* from the
734 	 * values used for Target Types.
735 	 */
736 
737 #if defined(CONFIG_AR9888_SUPPORT)
738 	hif_register_tbl_attach(ol_sc, HIF_TYPE_AR9888);
739 	target_register_tbl_attach(ol_sc, TARGET_TYPE_AR9888);
740 	target_type = TARGET_TYPE_AR9888;
741 #elif defined(CONFIG_AR6320_SUPPORT)
742 	id = device->id;
743 	if (((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
744 				MANUFACTURER_ID_QCA9377_BASE) ||
745 			((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
746 			 MANUFACTURER_ID_QCA9379_BASE)) {
747 		hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320V2);
748 		target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320V2);
749 	} else if ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
750 			MANUFACTURER_ID_AR6320_BASE) {
751 		int ar6kid = id->device & MANUFACTURER_ID_AR6K_REV_MASK;
752 
753 		if (ar6kid >= 1) {
754 			/* v2 or higher silicon */
755 			hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320V2);
756 			target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320V2);
757 		} else {
758 			/* legacy v1 silicon */
759 			hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320);
760 			target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320);
761 		}
762 	}
763 	target_type = TARGET_TYPE_AR6320;
764 
765 #endif
766 	scn->targetdef = ol_sc->targetdef;
767 	scn->hostdef = ol_sc->hostdef;
768 	scn->dev = &func->dev;
769 	ol_sc->bus_type = QDF_BUS_TYPE_SDIO;
770 	ol_sc->target_info.target_type = target_type;
771 
772 	scn->ramdump_base =
773 		pld_hif_sdio_get_virt_ramdump_mem(&func->dev,
774 						  &scn->ramdump_size);
775 	if (!scn->ramdump_base || !scn->ramdump_size) {
776 		HIF_ERROR("%s: Failed ramdump res alloc - base:%s, len:%lu",
777 			  __func__,
778 			  scn->ramdump_base ? "ok" : "null",
779 			  scn->ramdump_size);
780 	} else {
781 		HIF_INFO("%s: ramdump base %pK size %lu", __func__,
782 			 scn->ramdump_base, scn->ramdump_size);
783 	}
784 
785 	if (athdiag_procfs_init(scn) != 0) {
786 		ret = QDF_STATUS_E_FAILURE;
787 		goto err_attach1;
788 	}
789 
790 	return 0;
791 
792 err_attach1:
793 	if (scn->ramdump_base)
794 		pld_hif_sdio_release_ramdump_mem(scn->ramdump_base);
795 	scn = NULL;
796 	return ret;
797 }
798 
799 static QDF_STATUS hif_enable_func(struct hif_softc *ol_sc,
800 				  struct hif_sdio_dev *device,
801 				  struct sdio_func *func,
802 				  bool resume)
803 {
804 	int ret = QDF_STATUS_SUCCESS;
805 
806 	HIF_ENTER();
807 
808 	if (!device) {
809 		HIF_ERROR("%s: HIF device is NULL", __func__);
810 		return QDF_STATUS_E_INVAL;
811 	}
812 
813 	if (hif_sdio_func_enable(device, func))
814 		return QDF_STATUS_E_FAILURE;
815 
816 	/* create async I/O thread */
817 	if (!device->async_task && device->is_disabled) {
818 		device->async_shutdown = 0;
819 		device->async_task = kthread_create(async_task,
820 						    (void *)device,
821 						    "AR6K Async");
822 		if (IS_ERR(device->async_task)) {
823 			HIF_ERROR("%s: Error creating async task",
824 				  __func__);
825 			return QDF_STATUS_E_FAILURE;
826 		}
827 		device->is_disabled = false;
828 		wake_up_process(device->async_task);
829 	}
830 
831 	if (resume == false)
832 		ret = hif_sdio_probe(ol_sc, func, device);
833 
834 	HIF_EXIT();
835 
836 	return ret;
837 }
838 
839 /**
840  * power_state_change_notify() - SDIO bus power notification handler
841  * @config: hif device power change type
842  *
843  * Return: 0 on success, error number otherwise.
844  */
845 static QDF_STATUS
846 power_state_change_notify(struct hif_softc *ol_sc,
847 			  struct hif_sdio_dev *device,
848 			  enum HIF_DEVICE_POWER_CHANGE_TYPE config)
849 {
850 	QDF_STATUS status = QDF_STATUS_SUCCESS;
851 	struct sdio_func *func = device->func;
852 
853 	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
854 			("%s: config type %d\n",
855 			__func__, config));
856 	switch (config) {
857 	case HIF_DEVICE_POWER_DOWN:
858 		/* Disable 4bits to allow SDIO bus to detect
859 		 * DAT1 as interrupt source
860 		 */
861 		sdio_enable4bits(device, 0);
862 		break;
863 	case HIF_DEVICE_POWER_CUT:
864 		status = hif_disable_func(device, func, 1);
865 		if (!device->is_suspend) {
866 			device->power_config = config;
867 			mmc_detect_change(device->host, HZ / 3);
868 		}
869 		break;
870 	case HIF_DEVICE_POWER_UP:
871 		if (device->power_config == HIF_DEVICE_POWER_CUT) {
872 			if (device->is_suspend) {
873 				status = reinit_sdio(device);
874 				/* set power_config before EnableFunc to
875 				 * passthrough sdio r/w action when resuming
876 				 * from cut power
877 				 */
878 				device->power_config = config;
879 				if (status == QDF_STATUS_SUCCESS)
880 					status = hif_enable_func(ol_sc, device,
881 								 func, true);
882 			} else {
883 				/* device->func is bad pointer at this time */
884 				mmc_detect_change(device->host, 0);
885 				return QDF_STATUS_E_PENDING;
886 			}
887 		} else if (device->power_config == HIF_DEVICE_POWER_DOWN) {
888 			int ret = sdio_enable4bits(device, 1);
889 
890 			status = (ret == 0) ? QDF_STATUS_SUCCESS :
891 						QDF_STATUS_E_FAILURE;
892 		}
893 		break;
894 	}
895 	device->power_config = config;
896 
897 	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
898 			("%s:\n", __func__));
899 
900 	return status;
901 }
902 
903 
904 /**
905  * hif_configure_device() - configure sdio device
906  * @device: pointer to hif device structure
907  * @opcode: configuration type
908  * @config: configuration value to set
909  * @configLen: configuration length
910  *
911  * Return: 0 on success, error number otherwise.
912  */
913 QDF_STATUS
914 hif_configure_device(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
915 		     enum hif_device_config_opcode opcode,
916 		     void *config, uint32_t config_len)
917 {
918 	QDF_STATUS status = QDF_STATUS_SUCCESS;
919 
920 	switch (opcode) {
921 	case HIF_DEVICE_GET_BLOCK_SIZE:
922 		hif_dev_get_block_size(config);
923 		break;
924 
925 	case HIF_DEVICE_GET_FIFO_ADDR:
926 		hif_dev_get_fifo_address(device, config, config_len);
927 		break;
928 
929 	case HIF_DEVICE_GET_PENDING_EVENTS_FUNC:
930 		HIF_WARN("%s: opcode %d",  __func__, opcode);
931 		status = QDF_STATUS_E_FAILURE;
932 		break;
933 	case HIF_DEVICE_GET_IRQ_PROC_MODE:
934 		*((enum hif_device_irq_mode *) config) =
935 			HIF_DEVICE_IRQ_SYNC_ONLY;
936 		break;
937 	case HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC:
938 		HIF_WARN("%s: opcode %d", __func__, opcode);
939 		status = QDF_STATUS_E_FAILURE;
940 		break;
941 	case HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT:
942 		if (!device->scatter_enabled)
943 			return QDF_STATUS_E_NOSUPPORT;
944 		status =
945 			setup_hif_scatter_support(device,
946 				  (struct HIF_DEVICE_SCATTER_SUPPORT_INFO *)
947 				   config);
948 		if (QDF_IS_STATUS_ERROR(status))
949 			device->scatter_enabled = false;
950 		break;
951 	case HIF_DEVICE_GET_OS_DEVICE:
952 		/* pass back a pointer to the SDIO function's "dev" struct */
953 		((struct HIF_DEVICE_OS_DEVICE_INFO *) config)->os_dev =
954 			&device->func->dev;
955 		break;
956 	case HIF_DEVICE_POWER_STATE_CHANGE:
957 		status =
958 		power_state_change_notify(ol_sc, device,
959 					  *(enum HIF_DEVICE_POWER_CHANGE_TYPE *)
960 					   config);
961 		break;
962 	case HIF_DEVICE_GET_IRQ_YIELD_PARAMS:
963 		HIF_WARN("%s: opcode %d", __func__, opcode);
964 		status = QDF_STATUS_E_FAILURE;
965 		break;
966 	case HIF_DEVICE_SET_HTC_CONTEXT:
967 		device->htc_context = config;
968 		break;
969 	case HIF_DEVICE_GET_HTC_CONTEXT:
970 		if (config == NULL) {
971 			HIF_ERROR("%s: htc context is NULL", __func__);
972 			return QDF_STATUS_E_FAILURE;
973 		}
974 		*(void **)config = device->htc_context;
975 		break;
976 	case HIF_BMI_DONE:
977 		HIF_ERROR("%s: BMI_DONE", __func__);
978 		break;
979 	default:
980 		HIF_ERROR("%s: Unsupported  opcode: %d", __func__, opcode);
981 		status = QDF_STATUS_E_FAILURE;
982 	}
983 
984 	return status;
985 }
986 
987 /**
988  * hif_sdio_shutdown() - hif-sdio shutdown routine
989  * @hif_ctx: pointer to hif_softc structore
990  *
991  * Return: None.
992  */
993 void hif_sdio_shutdown(struct hif_softc *hif_ctx)
994 {
995 	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
996 	struct hif_sdio_dev *hif_device = scn->hif_handle;
997 
998 	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
999 			("%s: Enter\n", __func__));
1000 	if (hif_device != NULL) {
1001 		AR_DEBUG_ASSERT(hif_device->power_config == HIF_DEVICE_POWER_CUT
1002 				|| hif_device->func != NULL);
1003 	} else {
1004 		int i;
1005 		/* since we are unloading the driver anyways,
1006 		 * reset all cards in case the SDIO card is
1007 		 * externally powered and we are unloading the SDIO
1008 		 * stack. This avoids the problem when the SDIO stack
1009 		 * is reloaded and attempts are made to re-enumerate
1010 		 * a card that is already enumerated
1011 		 */
1012 		for (i = 0; i < MAX_HIF_DEVICES; ++i) {
1013 			if (hif_devices[i] && hif_devices[i]->func == NULL) {
1014 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1015 				("%s: Remove pending hif_device %pK\n",
1016 					 __func__, hif_devices[i]));
1017 				del_hif_device(hif_devices[i]);
1018 				hif_devices[i] = NULL;
1019 			}
1020 		}
1021 	}
1022 	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1023 			("%s: Exit\n", __func__));
1024 }
1025 
1026 /**
1027  * hif_device_inserted() - hif-sdio driver probe handler
1028  * @func: pointer to sdio_func
1029  * @id: pointer to sdio_device_id
1030  *
1031  * Return: 0 on success, error number otherwise.
1032  */
1033 static int hif_device_inserted(struct hif_softc *ol_sc,
1034 			       struct sdio_func *func,
1035 			       const struct sdio_device_id *id)
1036 {
1037 	int i, ret = 0, count;
1038 	struct hif_sdio_dev *device = NULL;
1039 
1040 	HIF_INFO("%s: F%X, VID: 0x%X, DevID: 0x%X, block size: 0x%X/0x%X\n",
1041 		 __func__, func->num, func->vendor, id->device,
1042 		 func->max_blksize, func->cur_blksize);
1043 
1044 	/* dma_mask should be populated here. Use the parent device's setting */
1045 	func->dev.dma_mask = mmc_dev(func->card->host)->dma_mask;
1046 
1047 	for (i = 0; i < MAX_HIF_DEVICES; ++i) {
1048 		struct hif_sdio_dev *hifdevice = hif_devices[i];
1049 
1050 		if (hifdevice &&
1051 		    hifdevice->power_config == HIF_DEVICE_POWER_CUT &&
1052 		    hifdevice->host == func->card->host) {
1053 			device = get_hif_device(func);
1054 			hifdevice->func = func;
1055 			hifdevice->power_config = HIF_DEVICE_POWER_UP;
1056 			hif_sdio_set_drvdata(func, hifdevice);
1057 
1058 			if (device->is_suspend) {
1059 				HIF_INFO("%s: Resume from suspend", __func__);
1060 				ret = reinit_sdio(device);
1061 			}
1062 			break;
1063 		}
1064 	}
1065 
1066 	/* If device not found, then it is a new insertion, alloc and add it */
1067 	if (device == NULL) {
1068 		if (add_hif_device(func) == NULL)
1069 			return QDF_STATUS_E_FAILURE;
1070 		device = get_hif_device(func);
1071 
1072 		for (i = 0; i < MAX_HIF_DEVICES; ++i) {
1073 			if (hif_devices[i] == NULL) {
1074 				hif_devices[i] = device;
1075 				break;
1076 			}
1077 		}
1078 		if (i == MAX_HIF_DEVICES) {
1079 			HIF_ERROR("%s: No more slots", __func__);
1080 			goto del_hif_dev;
1081 		}
1082 
1083 		device->id = id;
1084 		device->host = func->card->host;
1085 		device->is_disabled = true;
1086 		/* TODO: MMC SDIO3.0 Setting should also be modified in ReInit()
1087 		 * function when Power Manage work.
1088 		 */
1089 		sdio_claim_host(func);
1090 
1091 		hif_sdio_quirk_force_drive_strength(func);
1092 
1093 		hif_sdio_quirk_write_cccr(func);
1094 
1095 		ret = hif_sdio_set_bus_speed(func);
1096 
1097 		ret = hif_sdio_set_bus_width(func);
1098 		if (debugcccr)
1099 			hif_dump_cccr(device);
1100 
1101 		sdio_release_host(func);
1102 	}
1103 
1104 	qdf_spinlock_create(&device->lock);
1105 
1106 	qdf_spinlock_create(&device->asynclock);
1107 
1108 	DL_LIST_INIT(&device->scatter_req_head);
1109 
1110 	if (!nohifscattersupport) {
1111 		/* try to allow scatter operation on all instances,
1112 		 * unless globally overridden
1113 		 */
1114 		device->scatter_enabled = true;
1115 	} else
1116 		device->scatter_enabled = false;
1117 
1118 	/* Initialize the bus requests to be used later */
1119 	qdf_mem_zero(device->bus_request, sizeof(device->bus_request));
1120 	for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) {
1121 		sema_init(&device->bus_request[count].sem_req, 0);
1122 		hif_free_bus_request(device, &device->bus_request[count]);
1123 	}
1124 	sema_init(&device->sem_async, 0);
1125 
1126 	ret = hif_enable_func(ol_sc, device, func, false);
1127 	if ((ret == QDF_STATUS_SUCCESS || ret == QDF_STATUS_E_PENDING))
1128 		return 0;
1129 	ret = QDF_STATUS_E_FAILURE;
1130 del_hif_dev:
1131 	del_hif_device(device);
1132 	for (i = 0; i < MAX_HIF_DEVICES; ++i) {
1133 		if (hif_devices[i] == device) {
1134 			hif_devices[i] = NULL;
1135 			break;
1136 		}
1137 	}
1138 	if (i == MAX_HIF_DEVICES) {
1139 		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
1140 			("%s: No hif_devices[] slot for %pK",
1141 			__func__, device));
1142 	}
1143 	return ret;
1144 }
1145 
1146 /**
1147  * hif_ack_interrupt() - Acknowledge hif device irq
1148  * @device: pointer to struct hif_sdio_dev
1149  *
1150  * This should translate to an acknowledgment to the bus driver indicating that
1151  * the previous interrupt request has been serviced and the all the relevant
1152  * sources have been cleared. HTC is ready to process more interrupts.
1153  * This should prevent the bus driver from raising an interrupt unless the
1154  * previous one has been serviced and acknowledged using the previous API.
1155  *
1156  * Return: None.
1157  */
1158 void hif_ack_interrupt(struct hif_sdio_dev *device)
1159 {
1160 	AR_DEBUG_ASSERT(device != NULL);
1161 
1162 	/* Acknowledge our function IRQ */
1163 }
1164 
1165 /**
1166  * hif_sdio_configure_pipes - Configure pipes for the lower layer bus
1167  * @pdev - HIF layer object
1168  * @func - SDIO bus function object
1169  *
1170  * Return - NONE
1171  */
1172 void hif_sdio_configure_pipes(struct hif_sdio_dev *dev, struct sdio_func *func)
1173 {
1174 	/* ADMA-TODO */
1175 }
1176 
1177 /**
1178  * hif_allocate_bus_request() - Allocate hif bus request
1179  * @device: pointer to struct hif_sdio_dev
1180  *
1181  *
1182  * Return: pointer to struct bus_request structure.
1183  */
1184 struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device)
1185 {
1186 	struct bus_request *busrequest;
1187 
1188 	qdf_spin_lock_irqsave(&device->lock);
1189 	busrequest = device->bus_request_free_queue;
1190 	/* Remove first in list */
1191 	if (busrequest != NULL)
1192 		device->bus_request_free_queue = busrequest->next;
1193 
1194 	/* Release lock */
1195 	qdf_spin_unlock_irqrestore(&device->lock);
1196 	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1197 			("%s: hif_allocate_bus_request: 0x%pK\n",
1198 			__func__, busrequest));
1199 
1200 	return busrequest;
1201 }
1202 
1203 /**
1204  * hif_free_bus_request() - Free hif bus request
1205  * @device: pointer to struct hif_sdio_dev
1206  *
1207  *
1208  * Return: None.
1209  */
1210 void hif_free_bus_request(struct hif_sdio_dev *device,
1211 			  struct bus_request *busrequest)
1212 {
1213 	AR_DEBUG_ASSERT(busrequest != NULL);
1214 	/* Acquire lock */
1215 	qdf_spin_lock_irqsave(&device->lock);
1216 
1217 	/* Insert first in list */
1218 	busrequest->next = device->bus_request_free_queue;
1219 	busrequest->inusenext = NULL;
1220 	device->bus_request_free_queue = busrequest;
1221 
1222 	/* Release lock */
1223 	qdf_spin_unlock_irqrestore(&device->lock);
1224 }
1225 
1226 int hif_device_suspend(struct hif_softc *ol_sc, struct device *dev)
1227 {
1228 	struct sdio_func *func = dev_to_sdio_func(dev);
1229 	struct hif_sdio_dev *device = get_hif_device(func);
1230 	mmc_pm_flag_t pm_flag = 0;
1231 	enum HIF_DEVICE_POWER_CHANGE_TYPE config;
1232 	struct mmc_host *host = func->card->host;
1233 
1234 	host = device->func->card->host;
1235 
1236 	device->is_suspend = true;
1237 
1238 	switch (forcesleepmode) {
1239 		case 0: /* depend on sdio host pm capbility */
1240 			pm_flag = sdio_get_host_pm_caps(func);
1241 			break;
1242 		case 1: /* force WOW */
1243 			pm_flag |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
1244 			break;
1245 		case 2: /* force DeepSleep */
1246 			pm_flag &= ~MMC_PM_WAKE_SDIO_IRQ;
1247 			pm_flag |= MMC_PM_KEEP_POWER;
1248 			break;
1249 		case 3: /* force CutPower */
1250 			pm_flag &=
1251 				~(MMC_PM_WAKE_SDIO_IRQ | MMC_PM_WAKE_SDIO_IRQ);
1252 			break;
1253 	}
1254 
1255 	if (!(pm_flag & MMC_PM_KEEP_POWER)) {
1256 		/* setting power_config before hif_configure_device to
1257 		 * skip sdio r/w when suspending with cut power
1258 		 */
1259 		HIF_INFO("%s: Power cut", __func__);
1260 		config = HIF_DEVICE_POWER_CUT;
1261 		device->power_config = config;
1262 
1263 		hif_configure_device(ol_sc, device,
1264 				     HIF_DEVICE_POWER_STATE_CHANGE,
1265 				     &config,
1266 				     sizeof(config));
1267 		hif_mask_interrupt(device);
1268 		device->device_state = HIF_DEVICE_STATE_CUTPOWER;
1269 		return 0;
1270 	}
1271 
1272 	if (sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER)) {
1273 		HIF_ERROR("%s: set pm_flags failed", __func__);
1274 		return -EINVAL;
1275 	}
1276 
1277 	if (pm_flag & MMC_PM_WAKE_SDIO_IRQ) {
1278 		HIF_INFO("%s: WOW mode ", __func__);
1279 		config = HIF_DEVICE_POWER_DOWN;
1280 		hif_configure_device(ol_sc, device,
1281 				     HIF_DEVICE_POWER_STATE_CHANGE,
1282 				     &config,
1283 				     sizeof(config));
1284 
1285 		if (sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ)) {
1286 			HIF_ERROR("%s: set pm_flags failed", __func__);
1287 			return -EINVAL;
1288 		}
1289 		hif_mask_interrupt(device);
1290 		device->device_state = HIF_DEVICE_STATE_WOW;
1291 		return 0;
1292 	} else {
1293 		HIF_INFO("%s: deep sleep enter", __func__);
1294 		msleep(100);
1295 		hif_mask_interrupt(device);
1296 		device->device_state = HIF_DEVICE_STATE_DEEPSLEEP;
1297 		return 0;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 int hif_device_resume(struct hif_softc *ol_sc, struct device *dev)
1304 {
1305 	struct sdio_func *func = dev_to_sdio_func(dev);
1306 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1307 	enum HIF_DEVICE_POWER_CHANGE_TYPE config;
1308 	struct hif_sdio_dev *device;
1309 
1310 	device = get_hif_device(func);
1311 	if (!device) {
1312 		HIF_ERROR("%s: hif object is null", __func__);
1313 		return -EINVAL;
1314 	}
1315 
1316 	if (device->device_state == HIF_DEVICE_STATE_CUTPOWER) {
1317 		config = HIF_DEVICE_POWER_UP;
1318 		hif_configure_device(ol_sc, device,
1319 				     HIF_DEVICE_POWER_STATE_CHANGE,
1320 				     &config,
1321 				     sizeof(config));
1322 		hif_enable_func(ol_sc, device, func, true);
1323 	} else if (device->device_state == HIF_DEVICE_STATE_DEEPSLEEP) {
1324 		hif_un_mask_interrupt(device);
1325 	} else if (device->device_state == HIF_DEVICE_STATE_WOW) {
1326 		/*TODO:WOW support */
1327 		hif_un_mask_interrupt(device);
1328 	}
1329 
1330 	device->is_suspend = false;
1331 	device->device_state = HIF_DEVICE_STATE_ON;
1332 
1333 	return QDF_IS_STATUS_SUCCESS(status) ? 0 : status;
1334 }
1335 
1336 /**
1337  * hif_sdio_remove() - remove sdio device
1338  * @conext: sdio device context
1339  * @hif_handle: pointer to sdio function
1340  *
1341  * Return: 0 for success and non-zero for failure
1342  */
1343 static A_STATUS hif_sdio_remove(void *context, void *hif_handle)
1344 {
1345 	HIF_ENTER();
1346 
1347 	if (!scn) {
1348 		QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
1349 			  "Global SDIO context is NULL");
1350 		return A_ERROR;
1351 	}
1352 
1353 	athdiag_procfs_remove();
1354 
1355 #ifndef TARGET_DUMP_FOR_NON_QC_PLATFORM
1356 	iounmap(scn->ramdump_base);
1357 #endif
1358 
1359 	HIF_EXIT();
1360 
1361 	return 0;
1362 }
1363 static void hif_device_removed(struct sdio_func *func)
1364 {
1365 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1366 	struct hif_sdio_dev *device;
1367 	int i;
1368 
1369 	AR_DEBUG_ASSERT(func != NULL);
1370 	HIF_ENTER();
1371 	device = get_hif_device(func);
1372 
1373 	if (device->power_config == HIF_DEVICE_POWER_CUT) {
1374 		device->func = NULL;    /* func will be free by mmc stack */
1375 		return;         /* Just return for cut-off mode */
1376 	}
1377 	for (i = 0; i < MAX_HIF_DEVICES; ++i) {
1378 		if (hif_devices[i] == device)
1379 			hif_devices[i] = NULL;
1380 	}
1381 
1382 	hif_sdio_remove(device->claimed_ctx, device);
1383 
1384 	hif_mask_interrupt(device);
1385 
1386 	if (device->is_disabled)
1387 		device->is_disabled = false;
1388 	else
1389 		status = hif_disable_func(device, func,
1390 					  reset_sdio_on_unload ? true : false);
1391 
1392 
1393 	del_hif_device(device);
1394 	if (status != QDF_STATUS_SUCCESS)
1395 		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
1396 		  ("%s: Unable to disable sdio func\n",
1397 		   __func__));
1398 
1399 	HIF_EXIT();
1400 }
1401 
1402 static struct hif_sdio_dev *add_hif_device(struct sdio_func *func)
1403 {
1404 	struct hif_sdio_dev *hifdevice = NULL;
1405 	int ret = 0;
1406 
1407 	HIF_ENTER();
1408 	AR_DEBUG_ASSERT(func != NULL);
1409 	hifdevice = (struct hif_sdio_dev *) qdf_mem_malloc(sizeof(
1410 							struct hif_sdio_dev));
1411 	AR_DEBUG_ASSERT(hifdevice != NULL);
1412 	if (hifdevice == NULL) {
1413 		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("Alloc hif device fail\n"));
1414 		return NULL;
1415 	}
1416 #if HIF_USE_DMA_BOUNCE_BUFFER
1417 	hifdevice->dma_buffer = qdf_mem_malloc(HIF_DMA_BUFFER_SIZE);
1418 	AR_DEBUG_ASSERT(hifdevice->dma_buffer != NULL);
1419 	if (hifdevice->dma_buffer == NULL) {
1420 		qdf_mem_free(hifdevice);
1421 		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("Alloc dma buffer fail\n"));
1422 		return NULL;
1423 	}
1424 #endif
1425 	hifdevice->func = func;
1426 	hifdevice->power_config = HIF_DEVICE_POWER_UP;
1427 	hifdevice->device_state = HIF_DEVICE_STATE_ON;
1428 	ret = hif_sdio_set_drvdata(func, hifdevice);
1429 	HIF_EXIT("status %d", ret);
1430 
1431 	return hifdevice;
1432 }
1433 
1434 static void del_hif_device(struct hif_sdio_dev *device)
1435 {
1436 	AR_DEBUG_ASSERT(device != NULL);
1437 	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1438 			("%s: deleting hif device 0x%pK\n",
1439 				__func__, device));
1440 	if (device->dma_buffer != NULL)
1441 		qdf_mem_free(device->dma_buffer);
1442 
1443 	qdf_mem_free(device);
1444 }
1445 
1446 QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device,
1447 				struct htc_callbacks *callbacks)
1448 {
1449 	if (device->htc_callbacks.context != NULL)
1450 		/* already in use! */
1451 		return QDF_STATUS_E_FAILURE;
1452 	device->htc_callbacks = *callbacks;
1453 
1454 	return QDF_STATUS_SUCCESS;
1455 }
1456 
1457 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1458 {
1459 	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
1460 	struct hif_sdio_dev *hif_device = scn->hif_handle;
1461 
1462 	qdf_mem_zero(&hif_device->htc_callbacks,
1463 			  sizeof(hif_device->htc_callbacks));
1464 }
1465 
1466 int func0_cmd52_write_byte(struct mmc_card *card,
1467 			   unsigned int address,
1468 			   unsigned char byte)
1469 {
1470 	struct mmc_command io_cmd;
1471 	unsigned long arg;
1472 	int status = 0;
1473 
1474 	memset(&io_cmd, 0, sizeof(io_cmd));
1475 	SDIO_SET_CMD52_WRITE_ARG(arg, 0, address, byte);
1476 	io_cmd.opcode = SD_IO_RW_DIRECT;
1477 	io_cmd.arg = arg;
1478 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
1479 	status = mmc_wait_for_cmd(card->host, &io_cmd, 0);
1480 
1481 	if (status)
1482 		HIF_ERROR("%s: mmc_wait_for_cmd returned %d",
1483 			  __func__, status);
1484 
1485 	return status;
1486 }
1487 
1488 int func0_cmd52_read_byte(struct mmc_card *card,
1489 			  unsigned int address,
1490 			  unsigned char *byte)
1491 {
1492 	struct mmc_command io_cmd;
1493 	unsigned long arg;
1494 	int32_t err;
1495 
1496 	memset(&io_cmd, 0, sizeof(io_cmd));
1497 	SDIO_SET_CMD52_READ_ARG(arg, 0, address);
1498 	io_cmd.opcode = SD_IO_RW_DIRECT;
1499 	io_cmd.arg = arg;
1500 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
1501 
1502 	err = mmc_wait_for_cmd(card->host, &io_cmd, 0);
1503 
1504 	if ((!err) && (byte))
1505 		*byte = io_cmd.resp[0] & 0xFF;
1506 
1507 	if (err)
1508 		HIF_ERROR("%s: mmc_wait_for_cmd returned %d",
1509 			  __func__, err);
1510 
1511 	return err;
1512 }
1513 
1514 void hif_dump_cccr(struct hif_sdio_dev *hif_device)
1515 {
1516 	unsigned int i;
1517 	uint8_t cccr_val;
1518 	uint32_t err;
1519 
1520 	HIF_ERROR("%s: Enter", __func__);
1521 
1522 	if (!hif_device || !hif_device->func ||
1523 				!hif_device->func->card) {
1524 		HIF_ERROR("%s: incorrect input", __func__);
1525 		return;
1526 	}
1527 
1528 	for (i = 0; i <= 0x16; i++) {
1529 		err = func0_cmd52_read_byte(hif_device->func->card,
1530 						i, &cccr_val);
1531 		if (err)
1532 			HIF_ERROR("%s:Reading CCCR 0x%02X failed: %d",
1533 				  __func__, i, (unsigned int)err);
1534 		else
1535 			HIF_ERROR("%X(%X) ", i, (unsigned int)cccr_val);
1536 	}
1537 
1538 	HIF_ERROR("%s: Exit", __func__);
1539 }
1540 
1541 int hif_sdio_device_inserted(struct hif_softc *ol_sc,
1542 			     struct device *dev,
1543 			     const struct sdio_device_id *id)
1544 {
1545 	struct sdio_func *func = dev_to_sdio_func(dev);
1546 	int status = 0;
1547 
1548 	HIF_ERROR("%s: Enter", __func__);
1549 	status = hif_device_inserted(ol_sc, func, id);
1550 	HIF_ERROR("%s: Exit", __func__);
1551 
1552 	return status;
1553 }
1554 
1555 void hif_sdio_device_removed(struct sdio_func *func)
1556 {
1557 	hif_device_removed(func);
1558 }
1559