xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_lock.h>
20 #include "adma.h"
21 #include "hif_sdio_internal.h"
22 #include "pld_sdio.h"
23 #include "if_sdio.h"
24 
25 /**
26  * hif_dev_get_fifo_address() - get the fifo addresses for dma
27  * @pdev:  SDIO HIF object
28  * @c : FIFO address config pointer
29  *
30  * Return : 0 for success, non-zero for error
31  */
32 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
33 			     void *c,
34 			     uint32_t config_len)
35 {
36 	/* SDIO AL handles DMA Addresses */
37 	return 0;
38 }
39 
40 /**
41  * hif_dev_get_block_size() - get the adma block size for dma
42  * @config : block size config pointer
43  *
44  * Return : NONE
45  */
46 void hif_dev_get_block_size(void *config)
47 {
48 	/* TODO Get block size used by AL Layer in Mission ROM Mode */
49 	*((uint32_t *)config) = HIF_BLOCK_SIZE; /* QCN_SDIO_MROM_BLK_SZ TODO */
50 }
51 
52 /**
53  * hif_dev_configure_pipes() - configure pipes
54  * @pdev: SDIO HIF object
55  * @func: sdio function object
56  *
57  * Return : 0 for success, non-zero for error
58  */
59 int hif_dev_configure_pipes(struct hif_sdio_dev *pdev, struct sdio_func *func)
60 {
61 	/* SDIO AL Configures SDIO Channels */
62 	return 0;
63 }
64 
65 /** hif_dev_set_mailbox_swap() - Set the mailbox swap
66  * @pdev : The HIF layer object
67  *
68  * Return: none
69  */
70 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
71 {
72 	/* SDIO AL doesn't use mailbox architecture */
73 }
74 
75 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
76  * @pdev : The HIF layer object
77  *
78  * Return: true or false
79  */
80 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
81 {
82 	/* SDIO AL doesn't use mailbox architecture */
83 	return false;
84 }
85 
86 /**
87  * hif_dev_dsr_handler() - Synchronous interrupt handler
88  *
89  * @context: hif send context
90  *
91  * Return: 0 for success and non-zero for failure
92  */
93 QDF_STATUS hif_dev_dsr_handler(void *context)
94 {
95 	/* SDIO AL handles interrupts */
96 	return QDF_STATUS_SUCCESS;
97 }
98 
99 /**
100  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
101  * @pDev: SDIO HIF object
102  * @ServiceId: sevice index
103  * @ULPipe: uplink pipe id
104  * @DLPipe: down-linklink pipe id
105  *
106  * Return: 0 on success, error value on invalid map
107  */
108 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
109 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
110 {
111 	QDF_STATUS status = QDF_STATUS_SUCCESS;
112 
113 	switch (svc) {
114 	case HTT_DATA_MSG_SVC:
115 		*dl_pipe = 2;
116 		*ul_pipe = 3;
117 		break;
118 
119 	case HTC_CTRL_RSVD_SVC:
120 	case HTC_RAW_STREAMS_SVC:
121 		*dl_pipe = 0;
122 		*ul_pipe = 1;
123 		break;
124 
125 	case WMI_DATA_BE_SVC:
126 	case WMI_DATA_BK_SVC:
127 	case WMI_DATA_VI_SVC:
128 	case WMI_DATA_VO_SVC:
129 		*dl_pipe = 2;
130 		*ul_pipe = 3;
131 		break;
132 
133 	case WMI_CONTROL_SVC:
134 		*dl_pipe = 0;
135 		*ul_pipe = 1;
136 		break;
137 
138 	default:
139 		HIF_ERROR("%s: Err : Invalid service (%d)",
140 			  __func__, svc);
141 		status = QDF_STATUS_E_INVAL;
142 		break;
143 	}
144 	return status;
145 }
146 
147 /**
148  * hif_bus_configure() - configure the bus
149  * @hif_sc: pointer to the hif context.
150  *
151  * return: 0 for success. nonzero for failure.
152  */
153 int hif_sdio_bus_configure(struct hif_softc *hif_sc)
154 {
155 	struct pld_wlan_enable_cfg cfg;
156 	enum pld_driver_mode mode;
157 	uint32_t con_mode = hif_get_conparam(hif_sc);
158 
159 	if (con_mode == QDF_GLOBAL_FTM_MODE)
160 		mode = PLD_FTM;
161 	else if (con_mode == QDF_GLOBAL_COLDBOOT_CALIB_MODE)
162 		mode = PLD_COLDBOOT_CALIBRATION;
163 	else if (QDF_IS_EPPING_ENABLED(con_mode))
164 		mode = PLD_EPPING;
165 	else
166 		mode = PLD_MISSION;
167 
168 	return pld_wlan_enable(hif_sc->qdf_dev->dev, &cfg, mode);
169 }
170 
171 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
172  * @pdev : HIF layer object
173  *
174  * return 0 on success, error otherwise
175  */
176 int hif_dev_setup_device(struct hif_sdio_device *pdev)
177 {
178 	hif_dev_get_block_size(&pdev->BlockSize);
179 
180 	return 0;
181 }
182 
183 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
184  * @pdev SDIO HIF Object
185  *
186  * Return: NONE
187  */
188 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
189 {
190 	/* SDIO AL Handles Interrupts */
191 }
192 
193 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
194  * @pdev SDIO HIF Object
195  *
196  * Return: NONE
197  */
198 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
199 {
200 	/* SDIO AL Handles Interrupts */
201 }
202 
203 /**
204  * hif_dev_map_pipe_to_adma_chan() - maps pipe id to adma chan
205  * @pdev: The pointer to the hif device object
206  * @pipeid: pipe index
207  *
208  * Return: adma channel handle
209  */
210 struct sdio_al_channel_handle *hif_dev_map_pipe_to_adma_chan
211 (
212 struct hif_sdio_device *dev,
213 uint8_t pipeid
214 )
215 {
216 	struct hif_sdio_dev *pdev = dev->HIFDevice;
217 
218 	HIF_ENTER();
219 
220 	if ((pipeid == 0) || (pipeid == 1))
221 		return pdev->al_chan[0];
222 	else if ((pipeid == 2) || (pipeid == 3))
223 		return pdev->al_chan[1];
224 	else
225 		return NULL;
226 }
227 
228 /**
229  * hif_dev_map_adma_chan_to_pipe() - map adma chan to htc pipe
230  * @pdev: The pointer to the hif device object
231  * @chan: channel number
232  * @upload: boolean to decide upload or download
233  *
234  * Return: Invalid pipe index
235  */
236 uint8_t hif_dev_map_adma_chan_to_pipe(struct hif_sdio_device *pdev,
237 				      uint8_t chan, bool upload)
238 {
239 	HIF_INFO("%s: chan: %u, %s", __func__, chan,
240 		 upload ? "Upload" : "Download");
241 
242 	if (chan == 0) /* chan 0 is mapped to HTT */
243 		return upload ? 1 : 0;
244 	else if (chan == 1) /* chan 1 is mapped to WMI */
245 		return upload ? 3 : 2;
246 
247 	return (uint8_t)-1; /* invalid channel id */
248 }
249 
250 /**
251  * hif_get_send_address() - Get the transfer pipe address
252  * @pdev: The pointer to the hif device object
253  * @pipe: The pipe identifier
254  *
255  * Return 0 for success and non-zero for failure to map
256  */
257 int hif_get_send_address(struct hif_sdio_device *pdev,
258 			 uint8_t pipe, unsigned long *addr)
259 {
260 	struct sdio_al_channel_handle *chan = NULL;
261 
262 	if (!addr)
263 		return -EINVAL;
264 
265 	*addr = 0;
266 	chan = hif_dev_map_pipe_to_adma_chan(pdev, pipe);
267 
268 	if (!chan)
269 		return -EINVAL;
270 
271 	*addr = (unsigned long)chan;
272 
273 	return 0;
274 }
275 
276 /**
277  * hif_fixup_write_param() - Tweak the address and length parameters
278  * @pdev: The pointer to the hif device object
279  * @length: The length pointer
280  * @addr: The addr pointer
281  *
282  * Return: None
283  */
284 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
285 			   uint32_t *length, uint32_t *addr)
286 {
287 	HIF_ENTER();
288 	HIF_EXIT();
289 }
290 
291 #define HIF_MAX_RX_Q_ALLOC 0 /* TODO */
292 #define HIF_RX_Q_ALLOC_THRESHOLD 100
293 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
294 			    struct sdio_func *func,
295 			    bool reset)
296 {
297 	QDF_STATUS status = QDF_STATUS_SUCCESS;
298 #if HIF_MAX_RX_Q_ALLOC
299 	qdf_list_node_t *node;
300 	struct rx_q_entry *rx_q_elem;
301 #endif
302 	HIF_ENTER();
303 
304 #if HIF_MAX_RX_Q_ALLOC
305 	qdf_spin_lock_irqsave(&device->rx_q_lock);
306 
307 	for (; device->rx_q.count; ) {
308 		qdf_list_remove_back(&device->rx_q, &node);
309 		rx_q_elem = container_of(node, struct rx_q_entry, entry);
310 		if (rx_q_elem) {
311 			if (rx_q_elem->nbuf)
312 				qdf_nbuf_free(rx_q_elem->nbuf);
313 			qdf_mem_free(rx_q_elem);
314 		}
315 	}
316 	qdf_destroy_work(0, &device->rx_q_alloc_work);
317 
318 	qdf_spin_unlock_irqrestore(&device->rx_q_lock);
319 
320 	qdf_spinlock_destroy(&device->rx_q_lock);
321 #endif
322 
323 	status = hif_sdio_func_disable(device, func, reset);
324 	if (status == QDF_STATUS_SUCCESS)
325 		device->is_disabled = true;
326 
327 	cleanup_hif_scatter_resources(device);
328 
329 	HIF_EXIT();
330 
331 	return status;
332 }
333 
334 /**
335  * hif_enable_func() - Enable SDIO function
336  *
337  * @ol_sc: HIF object pointer
338  * @device: HIF device pointer
339  * @sdio_func: SDIO function pointer
340  * @resume: If this is called from resume or probe
341  *
342  * Return: 0 in case of success, else error value
343  */
344 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
345 			   struct sdio_func *func, bool resume)
346 {
347 	int ret = QDF_STATUS_SUCCESS;
348 
349 	if (!device) {
350 		HIF_ERROR("%s: HIF device is NULL", __func__);
351 		return QDF_STATUS_E_INVAL;
352 	}
353 
354 	if (!resume)
355 		ret = hif_sdio_probe(ol_sc, func, device);
356 
357 #if HIF_MAX_RX_Q_ALLOC
358 	if (!ret) {
359 		qdf_list_create(&device->rx_q, HIF_MAX_RX_Q_ALLOC);
360 		qdf_spinlock_create(&device->rx_q_lock);
361 		qdf_create_work(0, &device->rx_q_alloc_work,
362 				hif_sdio_rx_q_alloc, (void *)device);
363 		device->rx_q_alloc_work_scheduled = true;
364 		qdf_sched_work(0, &device->rx_q_alloc_work);
365 	}
366 #endif
367 	return ret;
368 }
369 
370 /**
371  * hif_sdio_get_net_buf() - Get a network buffer from the rx q
372  * @dev - HIF device object
373  *
374  * Return - NULL if out of buffers, else qdf_nbuf_t
375  */
376 #if HIF_MAX_RX_Q_ALLOC
377 static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len)
378 {
379 	qdf_list_node_t *node;
380 	qdf_nbuf_t nbuf = NULL;
381 	qdf_list_t *q = &dev->rx_q;
382 	struct rx_q_entry *elem = NULL;
383 
384 	/* TODO - Alloc nbuf based on buf_len */
385 	qdf_spin_lock_irqsave(&dev->rx_q_lock);
386 
387 	if (q->count) {
388 		qdf_list_remove_front(q, &node);
389 		elem = qdf_container_of(node, struct rx_q_entry, entry);
390 		nbuf = elem->nbuf;
391 	} else {
392 		HIF_ERROR("%s: no rx q elements", __func__);
393 	}
394 
395 	if (q->count <= HIF_RX_Q_ALLOC_THRESHOLD &&
396 	    !dev->rx_q_alloc_work_scheduled) {
397 		dev->rx_q_alloc_work_scheduled = true;
398 		qdf_sched_work(0, &dev->rx_q_alloc_work);
399 	}
400 
401 	qdf_spin_unlock_irqrestore(&dev->rx_q_lock);
402 
403 	qdf_mem_free(elem);
404 
405 	return nbuf;
406 }
407 #else
408 static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len)
409 {
410 	qdf_nbuf_t nbuf;
411 
412 	if (!buf_len)
413 		buf_len = HIF_SDIO_RX_BUFFER_SIZE;
414 
415 	nbuf = qdf_nbuf_alloc(NULL, buf_len, 0, 4, false);
416 
417 	return nbuf;
418 }
419 #endif
420 /**
421  * hif_sdio_rx_q_alloc() - Deferred work for pre-alloc rx q
422  * @ctx - Pointer to context object
423  *
424  * Return NONE
425  */
426 #if HIF_MAX_RX_Q_ALLOC
427 void hif_sdio_rx_q_alloc(void *ctx)
428 {
429 	struct rx_q_entry *rx_q_elem;
430 	struct hif_sdio_dev *dev = (struct hif_sdio_dev *)ctx;
431 	unsigned int rx_q_count = dev->rx_q.count;
432 
433 	HIF_ENTER();
434 	qdf_spin_lock_irqsave(&dev->rx_q_lock);
435 
436 	for (; rx_q_count < dev->rx_q.max_size; rx_q_count++) {
437 		rx_q_elem = qdf_mem_malloc(sizeof(struct rx_q_entry));
438 		if (!rx_q_elem) {
439 			HIF_ERROR("%s: failed to alloc rx q elem", __func__);
440 			break;
441 		}
442 
443 		/* TODO - Alloc nbuf based on payload_len in HTC Header */
444 		rx_q_elem->nbuf = qdf_nbuf_alloc(NULL, HIF_SDIO_RX_BUFFER_SIZE,
445 						 0, 4, false);
446 		if (!rx_q_elem->nbuf) {
447 			HIF_ERROR("%s: failed to alloc nbuf for rx", __func__);
448 			qdf_mem_free(rx_q_elem);
449 			break;
450 		}
451 
452 		qdf_list_insert_back(&dev->rx_q, &rx_q_elem->entry);
453 	}
454 	dev->rx_q_alloc_work_scheduled = false;
455 
456 	qdf_spin_unlock_irqrestore(&dev->rx_q_lock);
457 	HIF_EXIT();
458 }
459 #else
460 void hif_sdio_rx_q_alloc(void *ctx)
461 {
462 }
463 #endif
464 
465 #include <linux/qcn_sdio_al.h>
466 
467 struct sdio_al_channel_data qcn7605_chan[HIF_SDIO_MAX_AL_CHANNELS] = {
468 	{
469 		.name = "SDIO_AL_WLAN_CH0", /* HTT */
470 		.client_data = NULL, /* populate from client handle */
471 		.ul_xfer_cb = ul_xfer_cb,
472 		.dl_xfer_cb = dl_xfer_cb,
473 		.dl_data_avail_cb = dl_data_avail_cb,
474 		.dl_meta_data_cb = NULL
475 	},
476 	{
477 		.name = "SDIO_AL_WLAN_CH1", /* WMI */
478 		.client_data = NULL, /* populate from client handle */
479 		.ul_xfer_cb = ul_xfer_cb,
480 		.dl_xfer_cb = dl_xfer_cb,
481 		.dl_data_avail_cb = dl_data_avail_cb,
482 		.dl_meta_data_cb = NULL
483 	}
484 };
485 
486 /**
487  * hif_dev_register_channels()- Register transport layer channels
488  * @dev  : HIF device object
489  * @func : SDIO function pointer
490  *
491  * Return : success on configuration, else failure
492  */
493 int hif_dev_register_channels(struct hif_sdio_dev *dev, struct sdio_func *func)
494 {
495 	int ret = 0;
496 	unsigned int chan;
497 	struct sdio_al_channel_data *chan_data[HIF_ADMA_MAX_CHANS];
498 
499 	HIF_ENTER();
500 
501 	dev->al_client = pld_sdio_get_sdio_al_client_handle(func);
502 	if (ret || !dev->al_client) {
503 		HIF_ERROR("%s: Failed to get get sdio al handle", __func__);
504 		return ret;
505 	}
506 
507 	if ((func->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
508 	    MANUFACTURER_ID_QCN7605_BASE) {
509 		dev->adma_chans_used = 2;
510 		qcn7605_chan[0].client_data = dev->al_client->client_data;
511 		qcn7605_chan[1].client_data = dev->al_client->client_data;
512 		chan_data[0] = &qcn7605_chan[0];
513 		chan_data[1] = &qcn7605_chan[1];
514 	} else {
515 		dev->adma_chans_used = 0;
516 	}
517 
518 	for (chan = 0; chan < dev->adma_chans_used; chan++) {
519 		dev->al_chan[chan] =
520 		pld_sdio_register_sdio_al_channel(dev->al_client,
521 						  chan_data[chan]);
522 		if (!dev->al_chan[chan] || IS_ERR(dev->al_chan[chan])) {
523 			ret = -EINVAL;
524 			HIF_ERROR("%s: Channel registration failed", __func__);
525 		} else {
526 			dev->al_chan[chan]->priv = (void *)dev;
527 			HIF_INFO("%s: chan %s : id : %u", __func__,
528 				 chan_data[chan]->name,
529 				 dev->al_chan[chan]->channel_id);
530 		}
531 	}
532 
533 	HIF_EXIT();
534 
535 	return ret;
536 }
537 
538 /**
539  * hif_dev_unregister_channels()- Register transport layer channels
540  * @dev  : HIF device object
541  * @func : SDIO Function pointer
542  *
543  * Return : None
544  */
545 void hif_dev_unregister_channels(struct hif_sdio_dev *dev,
546 				 struct sdio_func *func)
547 {
548 	unsigned int chan;
549 
550 	if (!dev) {
551 		HIF_ERROR("%s: hif_sdio_dev is null", __func__);
552 		return;
553 	}
554 
555 	for (chan = 0; chan < dev->adma_chans_used; chan++) {
556 		dev->al_chan[chan]->priv = NULL;
557 		pld_sdio_unregister_sdio_al_channel(dev->al_chan[chan]);
558 	}
559 }
560 
561 /**
562  * hif_read_write() - queue a read/write request
563  * @dev: pointer to hif device structure
564  * @address: address to read, actually channel pointer
565  * @buffer: buffer to hold read/write data
566  * @length: length to read/write
567  * @request: read/write/sync/async request
568  * @context: pointer to hold calling context
569  *
570  * Return: 0, pending  on success, error number otherwise.
571  */
572 QDF_STATUS
573 hif_read_write(struct hif_sdio_dev *dev,
574 	       unsigned long sdio_al_ch_handle,
575 	       char *cbuffer, uint32_t length,
576 	       uint32_t request, void *context)
577 {
578 	QDF_STATUS status = QDF_STATUS_SUCCESS;
579 	struct sdio_al_channel_handle *ch;
580 	struct bus_request *bus_req;
581 	enum sdio_al_dma_direction dir;
582 	struct hif_sdio_device *device;
583 	QDF_STATUS (*rx_comp)(void *, qdf_nbuf_t, uint8_t);
584 	qdf_nbuf_t nbuf;
585 	int ret = 0, payload_len = 0;
586 	unsigned char *buffer = (unsigned char *)cbuffer;
587 
588 	if (!dev || !sdio_al_ch_handle) {
589 		HIF_ERROR("%s: device = %pK, addr = %lu", __func__,
590 			  dev, sdio_al_ch_handle);
591 		return QDF_STATUS_E_INVAL;
592 	}
593 
594 	if (!(request & HIF_ASYNCHRONOUS) &&
595 	    !(request & HIF_SYNCHRONOUS)) {
596 		HIF_ERROR("%s: Invalid request mode", __func__);
597 		return QDF_STATUS_E_INVAL;
598 	}
599 
600 	/*sdio r/w action is not needed when suspend, so just return */
601 	if ((dev->is_suspend) &&
602 	    (dev->power_config == HIF_DEVICE_POWER_CUT)) {
603 		HIF_INFO("%s: skip in suspend", __func__);
604 		return QDF_STATUS_SUCCESS;
605 	}
606 
607 	ch = (struct sdio_al_channel_handle *)sdio_al_ch_handle;
608 
609 	bus_req = hif_allocate_bus_request(dev);
610 	if (!bus_req) {
611 		HIF_ERROR("%s: Bus alloc failed", __func__);
612 		return QDF_STATUS_E_FAILURE;
613 	}
614 
615 	bus_req->address = sdio_al_ch_handle;
616 	bus_req->length = length;
617 	bus_req->request = request;
618 	bus_req->context = context;
619 	bus_req->buffer = buffer;
620 
621 	/* Request SDIO AL to do transfer */
622 	dir = (request & HIF_SDIO_WRITE) ? SDIO_AL_TX : SDIO_AL_RX;
623 
624 	if (request & HIF_SYNCHRONOUS) {
625 		ret = sdio_al_queue_transfer(ch,
626 					     dir,
627 					     bus_req->buffer,
628 					     bus_req->length,
629 					     1); /* higher priority */
630 		if (ret) {
631 			status = QDF_STATUS_E_FAILURE;
632 			HIF_ERROR("%s: SYNC REQ failed ret=%d", __func__, ret);
633 		} else {
634 			status = QDF_STATUS_SUCCESS;
635 		}
636 
637 		hif_free_bus_request(dev, bus_req);
638 
639 		if ((status == QDF_STATUS_SUCCESS) && (dir == SDIO_AL_RX)) {
640 			nbuf = (qdf_nbuf_t)context;
641 			payload_len = HTC_GET_FIELD(bus_req->buffer,
642 						    HTC_FRAME_HDR,
643 						    PAYLOADLEN);
644 			qdf_nbuf_set_pktlen(nbuf, payload_len + HTC_HDR_LENGTH);
645 			device = (struct hif_sdio_device *)dev->htc_context;
646 			rx_comp = device->hif_callbacks.rxCompletionHandler;
647 			rx_comp(device->hif_callbacks.Context, nbuf, 0);
648 		}
649 	} else {
650 		ret = sdio_al_queue_transfer_async(ch,
651 						   dir,
652 						   bus_req->buffer,
653 						   bus_req->length,
654 						   1, /* higher priority */
655 						   (void *)bus_req);
656 		if (ret) {
657 			status = QDF_STATUS_E_FAILURE;
658 			HIF_ERROR("%s: ASYNC REQ fail ret=%d for len=%d ch=%d",
659 				  __func__, ret, length, ch->channel_id);
660 			hif_free_bus_request(dev, bus_req);
661 		} else {
662 			status = QDF_STATUS_E_PENDING;
663 		}
664 	}
665 	return status;
666 }
667 
668 /**
669  * ul_xfer_cb() - Completion call back for asynchronous transfer
670  * @ch_handle: The sdio al channel handle
671  * @result: The result of the operation
672  * @context: pointer to request context
673  *
674  * Return: None
675  */
676 void ul_xfer_cb(struct sdio_al_channel_handle *ch_handle,
677 		struct sdio_al_xfer_result *result,
678 		void *ctx)
679 {
680 	struct bus_request *req = (struct bus_request *)ctx;
681 	struct hif_sdio_dev *dev;
682 
683 	if (!ch_handle || !result) {
684 		HIF_ERROR("%s: Invalid args", __func__);
685 		qdf_assert_always(0);
686 		return;
687 	}
688 
689 	dev = (struct hif_sdio_dev *)ch_handle->priv;
690 
691 	if (result->xfer_status) {
692 		req->status = QDF_STATUS_E_FAILURE;
693 		HIF_ERROR("%s: ASYNC Tx failed status=%d", __func__,
694 			  result->xfer_status);
695 	} else {
696 		req->status = QDF_STATUS_SUCCESS;
697 	}
698 
699 	dev->htc_callbacks.rw_compl_handler(req->context, req->status);
700 
701 	hif_free_bus_request(dev, req);
702 }
703 
704 /**
705  * dl_data_avail_cb() - Called when data is available on a channel
706  * @ch_handle: The sdio al channel handle
707  * @len: The len of data available to download
708  *
709  * Return: None
710  */
711 /* Use the asynchronous method of transfer. This will help in
712  * completing READ in the transfer done callback later which
713  * runs in sdio al thread context. If we do the syncronous
714  * transfer here, the thread context won't be available and
715  * perhaps a new thread may be required here.
716  */
717 void dl_data_avail_cb(struct sdio_al_channel_handle *ch_handle,
718 		      unsigned int len)
719 {
720 	struct hif_sdio_dev *dev;
721 	unsigned int chan;
722 	qdf_nbuf_t nbuf;
723 
724 	if (!ch_handle || !len) {
725 		HIF_ERROR("%s: Invalid args %u", __func__, len);
726 		qdf_assert_always(0);
727 		return;
728 	}
729 
730 	dev = (struct hif_sdio_dev *)ch_handle->priv;
731 	chan = ch_handle->channel_id;
732 
733 	if (chan > HIF_SDIO_MAX_AL_CHANNELS) {
734 		HIF_ERROR("%s: Invalid Ch ID %d", __func__, chan);
735 		return;
736 	}
737 
738 	/* allocate a buffer for reading the data from the chip.
739 	 * Note that this is raw, unparsed buffer and will be
740 	 * processed in the transfer done callback.
741 	 */
742 	/* TODO, use global buffer instead of runtime allocations */
743 	nbuf = qdf_nbuf_alloc(NULL, len, 0, 4, false);
744 
745 	if (!nbuf) {
746 		HIF_ERROR("%s: Unable to alloc netbuf %u bytes", __func__, len);
747 		return;
748 	}
749 
750 	hif_read_write(dev, (unsigned long)ch_handle, nbuf->data, len,
751 		       HIF_RD_ASYNC_BLOCK_FIX, nbuf);
752 }
753 
754 #define is_pad_block(buf)	(*((uint32_t *)buf) == 0xbabababa)
755 uint16_t g_dbg_payload_len;
756 
757 /**
758  * dl_xfer_cb() - Call from lower layer after transfer is completed
759  * @ch_handle: The sdio al channel handle
760  * @result: The xfer result
761  * @ctx: Context passed in the transfer queuing
762  *
763  * Return: None
764  */
765 void dl_xfer_cb(struct sdio_al_channel_handle *ch_handle,
766 		struct sdio_al_xfer_result *result,
767 		void *ctx)
768 {
769 	unsigned char *buf;
770 	qdf_nbuf_t nbuf;
771 	uint32_t len;
772 	uint16_t payload_len = 0;
773 	struct hif_sdio_dev *dev;
774 	struct hif_sdio_device *device;
775 	struct bus_request *bus_req = (struct bus_request *)ctx;
776 	QDF_STATUS (*rx_completion)(void *, qdf_nbuf_t, uint8_t);
777 
778 	if (!bus_req) {
779 		HIF_ERROR("%s: Bus Req NULL!!!", __func__);
780 		qdf_assert_always(0);
781 		return;
782 	}
783 
784 	if (!ch_handle || !result) {
785 		HIF_ERROR("%s: Invalid args %pK %pK", __func__,
786 			  ch_handle, result);
787 		qdf_assert_always(0);
788 		return;
789 	}
790 
791 	dev = (struct hif_sdio_dev *)ch_handle->priv;
792 	if (result->xfer_status) {
793 		HIF_ERROR("%s: ASYNC Rx failed %d", __func__,
794 			  result->xfer_status);
795 		qdf_nbuf_free((qdf_nbuf_t)bus_req->context);
796 		hif_free_bus_request(dev, bus_req);
797 		return;
798 	}
799 
800 	device = (struct hif_sdio_device *)dev->htc_context;
801 	rx_completion = device->hif_callbacks.rxCompletionHandler;
802 
803 	buf = (unsigned char *)result->buf_addr;
804 	len = (unsigned int)result->xfer_len;
805 
806 	while (len >= sizeof(HTC_FRAME_HDR)) {
807 		if (is_pad_block(buf)) {
808 			/* End of Rx Buffer */
809 			break;
810 		}
811 
812 		if (HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID) >=
813 		    ENDPOINT_MAX) {
814 			HIF_ERROR("%s: invalid endpoint id: %u", __func__,
815 				  HTC_GET_FIELD(buf, HTC_FRAME_HDR,
816 						ENDPOINTID));
817 			break;
818 		}
819 
820 		/* Copy the HTC frame to the alloc'd packet buffer */
821 		payload_len = HTC_GET_FIELD(buf, HTC_FRAME_HDR, PAYLOADLEN);
822 		payload_len = qdf_le16_to_cpu(payload_len);
823 		if (!payload_len) {
824 			HIF_ERROR("%s:Invalid Payload len %d bytes", __func__,
825 				  payload_len);
826 			break;
827 		}
828 		if (payload_len > g_dbg_payload_len) {
829 			g_dbg_payload_len = payload_len;
830 			HIF_ERROR("Max Rx HTC Payload = %d", g_dbg_payload_len);
831 		}
832 
833 		nbuf = hif_sdio_get_nbuf(dev, payload_len + HTC_HEADER_LEN);
834 		if (!nbuf) {
835 			HIF_ERROR("%s: failed to alloc rx buffer", __func__);
836 			break;
837 		}
838 
839 		/* Check if payload fits in skb */
840 		if (qdf_nbuf_tailroom(nbuf) < payload_len + HTC_HEADER_LEN) {
841 			HIF_ERROR("%s: Payload + HTC_HDR %d > skb tailroom %d",
842 				  __func__, (payload_len + 8),
843 				  qdf_nbuf_tailroom(nbuf));
844 			qdf_nbuf_free(nbuf);
845 			break;
846 		}
847 
848 		qdf_mem_copy((uint8_t *)qdf_nbuf_data(nbuf), buf,
849 			     payload_len + HTC_HEADER_LEN);
850 
851 		qdf_nbuf_put_tail(nbuf, payload_len + HTC_HDR_LENGTH);
852 
853 		rx_completion(device->hif_callbacks.Context, nbuf,
854 			      0); /* don't care, not used */
855 
856 		len -= payload_len + HTC_HDR_LENGTH;
857 		buf += payload_len + HTC_HDR_LENGTH;
858 	}
859 
860 	qdf_nbuf_free((qdf_nbuf_t)bus_req->context);
861 	hif_free_bus_request(dev, bus_req);
862 }
863