xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_lock.h>
21 #include "adma.h"
22 #include "hif_sdio_internal.h"
23 #include "pld_sdio.h"
24 #include "if_sdio.h"
25 
26 /**
27  * hif_dev_get_fifo_address() - get the fifo addresses for dma
28  * @pdev:  SDIO HIF object
29  * @c : FIFO address config pointer
30  * @config_len: config length
31  *
32  * Return : 0 for success, non-zero for error
33  */
34 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
35 			     void *c,
36 			     uint32_t config_len)
37 {
38 	/* SDIO AL handles DMA Addresses */
39 	return 0;
40 }
41 
42 /**
43  * hif_dev_get_block_size() - get the adma block size for dma
44  * @config : block size config pointer
45  *
46  * Return : NONE
47  */
48 void hif_dev_get_block_size(void *config)
49 {
50 	/* TODO Get block size used by AL Layer in Mission ROM Mode */
51 	*((uint32_t *)config) = HIF_BLOCK_SIZE; /* QCN_SDIO_MROM_BLK_SZ TODO */
52 }
53 
54 /**
55  * hif_dev_configure_pipes() - configure pipes
56  * @pdev: SDIO HIF object
57  * @func: sdio function object
58  *
59  * Return : 0 for success, non-zero for error
60  */
61 int hif_dev_configure_pipes(struct hif_sdio_dev *pdev, struct sdio_func *func)
62 {
63 	/* SDIO AL Configures SDIO Channels */
64 	return 0;
65 }
66 
67 /**
68  * hif_dev_set_mailbox_swap() - Set the mailbox swap
69  * @pdev: The HIF layer object
70  *
71  * Return: none
72  */
73 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
74 {
75 	/* SDIO AL doesn't use mailbox architecture */
76 }
77 
78 /**
79  * hif_dev_get_mailbox_swap() - Get the mailbox swap setting
80  * @pdev: The HIF layer object
81  *
82  * Return: true or false
83  */
84 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
85 {
86 	/* SDIO AL doesn't use mailbox architecture */
87 	return false;
88 }
89 
90 /**
91  * hif_dev_dsr_handler() - Synchronous interrupt handler
92  * @context: hif send context
93  *
94  * Return: 0 for success and non-zero for failure
95  */
96 QDF_STATUS hif_dev_dsr_handler(void *context)
97 {
98 	/* SDIO AL handles interrupts */
99 	return QDF_STATUS_SUCCESS;
100 }
101 
102 /**
103  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
104  * @pdev: SDIO HIF object
105  * @svc: service index
106  * @ul_pipe: uplink pipe id
107  * @dl_pipe: down-linklink pipe id
108  *
109  * Return: 0 on success, error value on invalid map
110  */
111 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
112 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
113 {
114 	QDF_STATUS status = QDF_STATUS_SUCCESS;
115 
116 	switch (svc) {
117 	case HTT_DATA_MSG_SVC:
118 		*dl_pipe = 2;
119 		*ul_pipe = 3;
120 		break;
121 
122 	case HTC_CTRL_RSVD_SVC:
123 	case HTC_RAW_STREAMS_SVC:
124 		*dl_pipe = 0;
125 		*ul_pipe = 1;
126 		break;
127 
128 	case WMI_DATA_BE_SVC:
129 	case WMI_DATA_BK_SVC:
130 	case WMI_DATA_VI_SVC:
131 	case WMI_DATA_VO_SVC:
132 		*dl_pipe = 2;
133 		*ul_pipe = 3;
134 		break;
135 
136 	case WMI_CONTROL_SVC:
137 		*dl_pipe = 0;
138 		*ul_pipe = 1;
139 		break;
140 
141 	default:
142 		hif_err("Invalid service: %d", svc);
143 		status = QDF_STATUS_E_INVAL;
144 		break;
145 	}
146 	return status;
147 }
148 
149 /**
150  * hif_sdio_bus_configure() - configure the bus
151  * @hif_sc: pointer to the hif context.
152  *
153  * Return: 0 for success. nonzero for failure.
154  */
155 int hif_sdio_bus_configure(struct hif_softc *hif_sc)
156 {
157 	struct pld_wlan_enable_cfg cfg;
158 	enum pld_driver_mode mode;
159 	uint32_t con_mode = hif_get_conparam(hif_sc);
160 
161 	if (con_mode == QDF_GLOBAL_FTM_MODE)
162 		mode = PLD_FTM;
163 	else if (con_mode == QDF_GLOBAL_COLDBOOT_CALIB_MODE)
164 		mode = PLD_COLDBOOT_CALIBRATION;
165 	else if (QDF_IS_EPPING_ENABLED(con_mode))
166 		mode = PLD_EPPING;
167 	else
168 		mode = PLD_MISSION;
169 
170 	return pld_wlan_enable(hif_sc->qdf_dev->dev, &cfg, mode);
171 }
172 
173 /**
174  * hif_dev_setup_device() - Setup device specific stuff here required for hif
175  * @pdev : HIF layer object
176  *
177  * return 0 on success, error otherwise
178  */
179 int hif_dev_setup_device(struct hif_sdio_device *pdev)
180 {
181 	hif_dev_get_block_size(&pdev->BlockSize);
182 
183 	return 0;
184 }
185 
186 /**
187  * hif_dev_mask_interrupts() - Disable the interrupts in the device
188  * @pdev: SDIO HIF Object
189  *
190  * Return: NONE
191  */
192 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
193 {
194 	/* SDIO AL Handles Interrupts */
195 }
196 
197 /**
198  * hif_dev_unmask_interrupts() - Enable the interrupts in the device
199  * @pdev: SDIO HIF Object
200  *
201  * Return: NONE
202  */
203 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
204 {
205 	/* SDIO AL Handles Interrupts */
206 }
207 
208 /**
209  * hif_dev_map_pipe_to_adma_chan() - maps pipe id to adma chan
210  * @dev: The pointer to the hif device object
211  * @pipeid: pipe index
212  *
213  * Return: adma channel handle
214  */
215 struct sdio_al_channel_handle *hif_dev_map_pipe_to_adma_chan
216 (
217 struct hif_sdio_device *dev,
218 uint8_t pipeid
219 )
220 {
221 	struct hif_sdio_dev *pdev = dev->HIFDevice;
222 
223 	HIF_ENTER();
224 
225 	if ((pipeid == 0) || (pipeid == 1))
226 		return pdev->al_chan[0];
227 	else if ((pipeid == 2) || (pipeid == 3))
228 		return pdev->al_chan[1];
229 	else
230 		return NULL;
231 }
232 
233 /**
234  * hif_dev_map_adma_chan_to_pipe() - map adma chan to htc pipe
235  * @pdev: The pointer to the hif device object
236  * @chan: channel number
237  * @upload: boolean to decide upload or download
238  *
239  * Return: Invalid pipe index
240  */
241 uint8_t hif_dev_map_adma_chan_to_pipe(struct hif_sdio_device *pdev,
242 				      uint8_t chan, bool upload)
243 {
244 	hif_info("chan: %u, %s", chan, upload ? "Upload" : "Download");
245 
246 	if (chan == 0) /* chan 0 is mapped to HTT */
247 		return upload ? 1 : 0;
248 	else if (chan == 1) /* chan 1 is mapped to WMI */
249 		return upload ? 3 : 2;
250 
251 	return (uint8_t)-1; /* invalid channel id */
252 }
253 
254 /**
255  * hif_get_send_address() - Get the transfer pipe address
256  * @pdev: The pointer to the hif device object
257  * @pipe: The pipe identifier
258  * @addr: returned pipe address
259  *
260  * Return 0 for success and non-zero for failure to map
261  */
262 int hif_get_send_address(struct hif_sdio_device *pdev,
263 			 uint8_t pipe, unsigned long *addr)
264 {
265 	struct sdio_al_channel_handle *chan = NULL;
266 
267 	if (!addr)
268 		return -EINVAL;
269 
270 	*addr = 0;
271 	chan = hif_dev_map_pipe_to_adma_chan(pdev, pipe);
272 
273 	if (!chan)
274 		return -EINVAL;
275 
276 	*addr = (unsigned long)chan;
277 
278 	return 0;
279 }
280 
281 /**
282  * hif_fixup_write_param() - Tweak the address and length parameters
283  * @pdev: The pointer to the hif device object
284  * @req:
285  * @length: The length pointer
286  * @addr: The addr pointer
287  *
288  * Return: None
289  */
290 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
291 			   uint32_t *length, uint32_t *addr)
292 {
293 	HIF_ENTER();
294 	HIF_EXIT();
295 }
296 
297 #define HIF_MAX_RX_Q_ALLOC 0 /* TODO */
298 #define HIF_RX_Q_ALLOC_THRESHOLD 100
299 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
300 			    struct sdio_func *func,
301 			    bool reset)
302 {
303 	QDF_STATUS status = QDF_STATUS_SUCCESS;
304 #if HIF_MAX_RX_Q_ALLOC
305 	qdf_list_node_t *node;
306 	struct rx_q_entry *rx_q_elem;
307 #endif
308 	HIF_ENTER();
309 
310 #if HIF_MAX_RX_Q_ALLOC
311 	qdf_spin_lock_irqsave(&device->rx_q_lock);
312 
313 	for (; device->rx_q.count; ) {
314 		qdf_list_remove_back(&device->rx_q, &node);
315 		rx_q_elem = container_of(node, struct rx_q_entry, entry);
316 		if (rx_q_elem) {
317 			if (rx_q_elem->nbuf)
318 				qdf_nbuf_free(rx_q_elem->nbuf);
319 			qdf_mem_free(rx_q_elem);
320 		}
321 	}
322 	qdf_destroy_work(0, &device->rx_q_alloc_work);
323 
324 	qdf_spin_unlock_irqrestore(&device->rx_q_lock);
325 
326 	qdf_spinlock_destroy(&device->rx_q_lock);
327 #endif
328 
329 	status = hif_sdio_func_disable(device, func, reset);
330 	if (status == QDF_STATUS_SUCCESS)
331 		device->is_disabled = true;
332 
333 	cleanup_hif_scatter_resources(device);
334 
335 	HIF_EXIT();
336 
337 	return status;
338 }
339 
340 /**
341  * hif_enable_func() - Enable SDIO function
342  *
343  * @ol_sc: HIF object pointer
344  * @device: HIF device pointer
345  * @func: SDIO function pointer
346  * @resume: If this is called from resume or probe
347  *
348  * Return: 0 in case of success, else error value
349  */
350 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
351 			   struct sdio_func *func, bool resume)
352 {
353 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
354 
355 	if (!device) {
356 		hif_err("HIF device is NULL");
357 		return QDF_STATUS_E_INVAL;
358 	}
359 
360 	if (!resume)
361 		ret = hif_sdio_probe(ol_sc, func, device);
362 
363 #if HIF_MAX_RX_Q_ALLOC
364 	if (!ret) {
365 		qdf_list_create(&device->rx_q, HIF_MAX_RX_Q_ALLOC);
366 		qdf_spinlock_create(&device->rx_q_lock);
367 		qdf_create_work(0, &device->rx_q_alloc_work,
368 				hif_sdio_rx_q_alloc, (void *)device);
369 		device->rx_q_alloc_work_scheduled = true;
370 		qdf_sched_work(0, &device->rx_q_alloc_work);
371 	}
372 #endif
373 	return ret;
374 }
375 
376 /**
377  * hif_sdio_get_nbuf() - Get a network buffer from the rx q
378  * @dev: HIF device object
379  * @buf_len: buffer length
380  *
381  * Return: NULL if out of buffers, else qdf_nbuf_t
382  */
383 #if HIF_MAX_RX_Q_ALLOC
384 static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len)
385 {
386 	qdf_list_node_t *node;
387 	qdf_nbuf_t nbuf = NULL;
388 	qdf_list_t *q = &dev->rx_q;
389 	struct rx_q_entry *elem = NULL;
390 
391 	/* TODO - Alloc nbuf based on buf_len */
392 	qdf_spin_lock_irqsave(&dev->rx_q_lock);
393 
394 	if (q->count) {
395 		qdf_list_remove_front(q, &node);
396 		elem = qdf_container_of(node, struct rx_q_entry, entry);
397 		nbuf = elem->nbuf;
398 	} else {
399 		hif_err("no rx q elements");
400 	}
401 
402 	if (q->count <= HIF_RX_Q_ALLOC_THRESHOLD &&
403 	    !dev->rx_q_alloc_work_scheduled) {
404 		dev->rx_q_alloc_work_scheduled = true;
405 		qdf_sched_work(0, &dev->rx_q_alloc_work);
406 	}
407 
408 	qdf_spin_unlock_irqrestore(&dev->rx_q_lock);
409 
410 	qdf_mem_free(elem);
411 
412 	return nbuf;
413 }
414 #else
415 static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len)
416 {
417 	qdf_nbuf_t nbuf;
418 
419 	if (!buf_len)
420 		buf_len = HIF_SDIO_RX_BUFFER_SIZE;
421 
422 	nbuf = qdf_nbuf_alloc(NULL, buf_len, 0, 4, false);
423 
424 	return nbuf;
425 }
426 #endif
427 /**
428  * hif_sdio_rx_q_alloc() - Deferred work for pre-alloc rx q
429  * @ctx: Pointer to context object
430  *
431  * Return NONE
432  */
433 #if HIF_MAX_RX_Q_ALLOC
434 void hif_sdio_rx_q_alloc(void *ctx)
435 {
436 	struct rx_q_entry *rx_q_elem;
437 	struct hif_sdio_dev *dev = (struct hif_sdio_dev *)ctx;
438 	unsigned int rx_q_count = dev->rx_q.count;
439 
440 	HIF_ENTER();
441 	qdf_spin_lock_irqsave(&dev->rx_q_lock);
442 
443 	for (; rx_q_count < dev->rx_q.max_size; rx_q_count++) {
444 		rx_q_elem = qdf_mem_malloc(sizeof(struct rx_q_entry));
445 		if (!rx_q_elem) {
446 			hif_err("Failed to alloc rx q elem");
447 			break;
448 		}
449 
450 		/* TODO - Alloc nbuf based on payload_len in HTC Header */
451 		rx_q_elem->nbuf = qdf_nbuf_alloc(NULL, HIF_SDIO_RX_BUFFER_SIZE,
452 						 0, 4, false);
453 		if (!rx_q_elem->nbuf) {
454 			hif_err("Failed to alloc nbuf for rx");
455 			qdf_mem_free(rx_q_elem);
456 			break;
457 		}
458 
459 		qdf_list_insert_back(&dev->rx_q, &rx_q_elem->entry);
460 	}
461 	dev->rx_q_alloc_work_scheduled = false;
462 
463 	qdf_spin_unlock_irqrestore(&dev->rx_q_lock);
464 	HIF_EXIT();
465 }
466 #else
467 void hif_sdio_rx_q_alloc(void *ctx)
468 {
469 }
470 #endif
471 
472 #include <linux/qcn_sdio_al.h>
473 
474 struct sdio_al_channel_data qcn7605_chan[HIF_SDIO_MAX_AL_CHANNELS] = {
475 	{
476 		.name = "SDIO_AL_WLAN_CH0", /* HTT */
477 		.client_data = NULL, /* populate from client handle */
478 		.ul_xfer_cb = ul_xfer_cb,
479 		.dl_xfer_cb = dl_xfer_cb,
480 		.dl_data_avail_cb = dl_data_avail_cb,
481 		.dl_meta_data_cb = NULL
482 	},
483 	{
484 		.name = "SDIO_AL_WLAN_CH1", /* WMI */
485 		.client_data = NULL, /* populate from client handle */
486 		.ul_xfer_cb = ul_xfer_cb,
487 		.dl_xfer_cb = dl_xfer_cb,
488 		.dl_data_avail_cb = dl_data_avail_cb,
489 		.dl_meta_data_cb = NULL
490 	}
491 };
492 
493 /**
494  * hif_dev_register_channels()- Register transport layer channels
495  * @dev  : HIF device object
496  * @func : SDIO function pointer
497  *
498  * Return : success on configuration, else failure
499  */
500 int hif_dev_register_channels(struct hif_sdio_dev *dev, struct sdio_func *func)
501 {
502 	int ret = 0;
503 	unsigned int chan;
504 	struct sdio_al_channel_data *chan_data[HIF_ADMA_MAX_CHANS];
505 
506 	HIF_ENTER();
507 
508 	dev->al_client = pld_sdio_get_sdio_al_client_handle(func);
509 	if (ret || !dev->al_client) {
510 		hif_err("Failed to get get sdio al handle");
511 		return ret;
512 	}
513 
514 	if ((func->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
515 	    MANUFACTURER_ID_QCN7605_BASE) {
516 		dev->adma_chans_used = 2;
517 		qcn7605_chan[0].client_data = dev->al_client->client_data;
518 		qcn7605_chan[1].client_data = dev->al_client->client_data;
519 		chan_data[0] = &qcn7605_chan[0];
520 		chan_data[1] = &qcn7605_chan[1];
521 	} else {
522 		dev->adma_chans_used = 0;
523 	}
524 
525 	for (chan = 0; chan < dev->adma_chans_used; chan++) {
526 		dev->al_chan[chan] =
527 		pld_sdio_register_sdio_al_channel(dev->al_client,
528 						  chan_data[chan]);
529 		if (!dev->al_chan[chan] || IS_ERR(dev->al_chan[chan])) {
530 			ret = -EINVAL;
531 			hif_err("Channel registration failed");
532 		} else {
533 			dev->al_chan[chan]->priv = (void *)dev;
534 			hif_info("chan %s : id : %u",
535 				 chan_data[chan]->name,
536 				 dev->al_chan[chan]->channel_id);
537 		}
538 	}
539 
540 	HIF_EXIT();
541 
542 	return ret;
543 }
544 
545 /**
546  * hif_dev_unregister_channels()- Register transport layer channels
547  * @dev  : HIF device object
548  * @func : SDIO Function pointer
549  *
550  * Return : None
551  */
552 void hif_dev_unregister_channels(struct hif_sdio_dev *dev,
553 				 struct sdio_func *func)
554 {
555 	unsigned int chan;
556 
557 	if (!dev) {
558 		hif_err("hif_sdio_dev is null");
559 		return;
560 	}
561 
562 	for (chan = 0; chan < dev->adma_chans_used; chan++) {
563 		dev->al_chan[chan]->priv = NULL;
564 		pld_sdio_unregister_sdio_al_channel(dev->al_chan[chan]);
565 	}
566 }
567 
568 /**
569  * hif_read_write() - queue a read/write request
570  * @dev: pointer to hif device structure
571  * @sdio_al_ch_handle: address to read, actually channel pointer
572  * @cbuffer: buffer to hold read/write data
573  * @length: length to read/write
574  * @request: read/write/sync/async request
575  * @context: pointer to hold calling context
576  *
577  * Return: 0, pending  on success, error number otherwise.
578  */
579 QDF_STATUS
580 hif_read_write(struct hif_sdio_dev *dev,
581 	       unsigned long sdio_al_ch_handle,
582 	       char *cbuffer, uint32_t length,
583 	       uint32_t request, void *context)
584 {
585 	QDF_STATUS status = QDF_STATUS_SUCCESS;
586 	struct sdio_al_channel_handle *ch;
587 	struct bus_request *bus_req;
588 	enum sdio_al_dma_direction dir;
589 	struct hif_sdio_device *device;
590 	QDF_STATUS (*rx_comp)(void *, qdf_nbuf_t, uint8_t);
591 	qdf_nbuf_t nbuf;
592 	int ret = 0, payload_len = 0;
593 	unsigned char *buffer = (unsigned char *)cbuffer;
594 
595 	if (!dev || !sdio_al_ch_handle) {
596 		hif_err("Device = %pK, addr = %lu", dev, sdio_al_ch_handle);
597 		return QDF_STATUS_E_INVAL;
598 	}
599 
600 	if (!(request & HIF_ASYNCHRONOUS) &&
601 	    !(request & HIF_SYNCHRONOUS)) {
602 		hif_err("Invalid request mode: %d", request);
603 		return QDF_STATUS_E_INVAL;
604 	}
605 
606 	/*sdio r/w action is not needed when suspend, so just return */
607 	if ((dev->is_suspend) &&
608 	    (dev->power_config == HIF_DEVICE_POWER_CUT)) {
609 		hif_info("skip in suspend");
610 		return QDF_STATUS_SUCCESS;
611 	}
612 
613 	ch = (struct sdio_al_channel_handle *)sdio_al_ch_handle;
614 
615 	bus_req = hif_allocate_bus_request(dev);
616 	if (!bus_req) {
617 		hif_err("Bus alloc failed");
618 		return QDF_STATUS_E_FAILURE;
619 	}
620 
621 	bus_req->address = sdio_al_ch_handle;
622 	bus_req->length = length;
623 	bus_req->request = request;
624 	bus_req->context = context;
625 	bus_req->buffer = buffer;
626 
627 	/* Request SDIO AL to do transfer */
628 	dir = (request & HIF_SDIO_WRITE) ? SDIO_AL_TX : SDIO_AL_RX;
629 
630 	if (request & HIF_SYNCHRONOUS) {
631 		ret = sdio_al_queue_transfer(ch,
632 					     dir,
633 					     bus_req->buffer,
634 					     bus_req->length,
635 					     1); /* higher priority */
636 		if (ret) {
637 			status = QDF_STATUS_E_FAILURE;
638 			hif_err("SYNC REQ failed ret: %d", ret);
639 		} else {
640 			status = QDF_STATUS_SUCCESS;
641 		}
642 
643 		hif_free_bus_request(dev, bus_req);
644 
645 		if ((status == QDF_STATUS_SUCCESS) && (dir == SDIO_AL_RX)) {
646 			nbuf = (qdf_nbuf_t)context;
647 			payload_len = HTC_GET_FIELD(bus_req->buffer,
648 						    HTC_FRAME_HDR,
649 						    PAYLOADLEN);
650 			qdf_nbuf_set_pktlen(nbuf, payload_len + HTC_HDR_LENGTH);
651 			device = (struct hif_sdio_device *)dev->htc_context;
652 			rx_comp = device->hif_callbacks.rxCompletionHandler;
653 			rx_comp(device->hif_callbacks.Context, nbuf, 0);
654 		}
655 	} else {
656 		ret = sdio_al_queue_transfer_async(ch,
657 						   dir,
658 						   bus_req->buffer,
659 						   bus_req->length,
660 						   1, /* higher priority */
661 						   (void *)bus_req);
662 		if (ret) {
663 			status = QDF_STATUS_E_FAILURE;
664 			hif_err("ASYNC REQ fail ret: %d for len: %d ch: %d",
665 				ret, length, ch->channel_id);
666 			hif_free_bus_request(dev, bus_req);
667 		} else {
668 			status = QDF_STATUS_E_PENDING;
669 		}
670 	}
671 	return status;
672 }
673 
674 /**
675  * ul_xfer_cb() - Completion call back for asynchronous transfer
676  * @ch_handle: The sdio al channel handle
677  * @result: The result of the operation
678  * @ctx: pointer to request context
679  *
680  * Return: None
681  */
682 void ul_xfer_cb(struct sdio_al_channel_handle *ch_handle,
683 		struct sdio_al_xfer_result *result,
684 		void *ctx)
685 {
686 	struct bus_request *req = (struct bus_request *)ctx;
687 	struct hif_sdio_dev *dev;
688 
689 	if (!ch_handle || !result) {
690 		hif_err("Invalid args");
691 		qdf_assert_always(0);
692 		return;
693 	}
694 
695 	dev = (struct hif_sdio_dev *)ch_handle->priv;
696 
697 	if (result->xfer_status) {
698 		req->status = QDF_STATUS_E_FAILURE;
699 		hif_err("ASYNC Tx failed status: %d", result->xfer_status);
700 	} else {
701 		req->status = QDF_STATUS_SUCCESS;
702 	}
703 
704 	dev->htc_callbacks.rw_compl_handler(req->context, req->status);
705 
706 	hif_free_bus_request(dev, req);
707 }
708 
709 /**
710  * dl_data_avail_cb() - Called when data is available on a channel
711  * @ch_handle: The sdio al channel handle
712  * @len: The len of data available to download
713  *
714  * Return: None
715  */
716 /* Use the asynchronous method of transfer. This will help in
717  * completing READ in the transfer done callback later which
718  * runs in sdio al thread context. If we do the synchronous
719  * transfer here, the thread context won't be available and
720  * perhaps a new thread may be required here.
721  */
722 void dl_data_avail_cb(struct sdio_al_channel_handle *ch_handle,
723 		      unsigned int len)
724 {
725 	struct hif_sdio_dev *dev;
726 	unsigned int chan;
727 	qdf_nbuf_t nbuf;
728 
729 	if (!ch_handle || !len) {
730 		hif_err("Invalid args %u", len);
731 		qdf_assert_always(0);
732 		return;
733 	}
734 
735 	dev = (struct hif_sdio_dev *)ch_handle->priv;
736 	chan = ch_handle->channel_id;
737 
738 	if (chan > HIF_SDIO_MAX_AL_CHANNELS) {
739 		hif_err("Invalid Ch ID %d", chan);
740 		return;
741 	}
742 
743 	/* allocate a buffer for reading the data from the chip.
744 	 * Note that this is raw, unparsed buffer and will be
745 	 * processed in the transfer done callback.
746 	 */
747 	/* TODO, use global buffer instead of runtime allocations */
748 	nbuf = qdf_nbuf_alloc(NULL, len, 0, 4, false);
749 
750 	if (!nbuf) {
751 		hif_err("Unable to alloc netbuf %u bytes", len);
752 		return;
753 	}
754 
755 	hif_read_write(dev, (unsigned long)ch_handle, nbuf->data, len,
756 		       HIF_RD_ASYNC_BLOCK_FIX, nbuf);
757 }
758 
759 #define is_pad_block(buf)	(*((uint32_t *)buf) == 0xbabababa)
760 uint16_t g_dbg_payload_len;
761 
762 /**
763  * dl_xfer_cb() - Call from lower layer after transfer is completed
764  * @ch_handle: The sdio al channel handle
765  * @result: The xfer result
766  * @ctx: Context passed in the transfer queuing
767  *
768  * Return: None
769  */
770 void dl_xfer_cb(struct sdio_al_channel_handle *ch_handle,
771 		struct sdio_al_xfer_result *result,
772 		void *ctx)
773 {
774 	unsigned char *buf;
775 	qdf_nbuf_t nbuf;
776 	uint32_t len;
777 	uint16_t payload_len = 0;
778 	struct hif_sdio_dev *dev;
779 	struct hif_sdio_device *device;
780 	struct bus_request *bus_req = (struct bus_request *)ctx;
781 	QDF_STATUS (*rx_completion)(void *, qdf_nbuf_t, uint8_t);
782 
783 	if (!bus_req) {
784 		hif_err("Bus Req NULL!!!");
785 		qdf_assert_always(0);
786 		return;
787 	}
788 
789 	if (!ch_handle || !result) {
790 		hif_err("Invalid args %pK %pK", ch_handle, result);
791 		qdf_assert_always(0);
792 		return;
793 	}
794 
795 	dev = (struct hif_sdio_dev *)ch_handle->priv;
796 	if (result->xfer_status) {
797 		hif_err("ASYNC Rx failed %d", result->xfer_status);
798 		qdf_nbuf_free((qdf_nbuf_t)bus_req->context);
799 		hif_free_bus_request(dev, bus_req);
800 		return;
801 	}
802 
803 	device = (struct hif_sdio_device *)dev->htc_context;
804 	rx_completion = device->hif_callbacks.rxCompletionHandler;
805 
806 	buf = (unsigned char *)result->buf_addr;
807 	len = (unsigned int)result->xfer_len;
808 
809 	while (len >= sizeof(HTC_FRAME_HDR)) {
810 		if (is_pad_block(buf)) {
811 			/* End of Rx Buffer */
812 			break;
813 		}
814 
815 		if (HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID) >=
816 		    ENDPOINT_MAX) {
817 			hif_err("Invalid endpoint id: %u",
818 				HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID));
819 			break;
820 		}
821 
822 		/* Copy the HTC frame to the alloc'd packet buffer */
823 		payload_len = HTC_GET_FIELD(buf, HTC_FRAME_HDR, PAYLOADLEN);
824 		payload_len = qdf_le16_to_cpu(payload_len);
825 		if (!payload_len) {
826 			hif_err("Invalid Payload len %d bytes", payload_len);
827 			break;
828 		}
829 		if (payload_len > g_dbg_payload_len) {
830 			g_dbg_payload_len = payload_len;
831 			hif_err("Max Rx HTC Payload = %d", g_dbg_payload_len);
832 		}
833 
834 		nbuf = hif_sdio_get_nbuf(dev, payload_len + HTC_HEADER_LEN);
835 		if (!nbuf) {
836 			hif_err("Failed to alloc rx buffer");
837 			break;
838 		}
839 
840 		/* Check if payload fits in skb */
841 		if (qdf_nbuf_tailroom(nbuf) < payload_len + HTC_HEADER_LEN) {
842 			hif_err("Payload + HTC_HDR %d > skb tailroom %d",
843 				(payload_len + 8),
844 				qdf_nbuf_tailroom(nbuf));
845 			qdf_nbuf_free(nbuf);
846 			break;
847 		}
848 
849 		qdf_mem_copy((uint8_t *)qdf_nbuf_data(nbuf), buf,
850 			     payload_len + HTC_HEADER_LEN);
851 
852 		qdf_nbuf_put_tail(nbuf, payload_len + HTC_HDR_LENGTH);
853 
854 		rx_completion(device->hif_callbacks.Context, nbuf,
855 			      0); /* don't care, not used */
856 
857 		len -= payload_len + HTC_HDR_LENGTH;
858 		buf += payload_len + HTC_HDR_LENGTH;
859 	}
860 
861 	qdf_nbuf_free((qdf_nbuf_t)bus_req->context);
862 	hif_free_bus_request(dev, bus_req);
863 }
864