xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_lock.h>
20 #include "adma.h"
21 #include "hif_sdio_internal.h"
22 #include "pld_sdio.h"
23 #include "if_sdio.h"
24 
25 /**
26  * hif_dev_get_fifo_address() - get the fifo addresses for dma
27  * @pdev:  SDIO HIF object
28  * @c : FIFO address config pointer
29  *
30  * Return : 0 for success, non-zero for error
31  */
32 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
33 			     void *c,
34 			     uint32_t config_len)
35 {
36 	/* SDIO AL handles DMA Addresses */
37 	return 0;
38 }
39 
40 /**
41  * hif_dev_get_block_size() - get the adma block size for dma
42  * @config : block size config pointer
43  *
44  * Return : NONE
45  */
46 void hif_dev_get_block_size(void *config)
47 {
48 	/* TODO Get block size used by AL Layer in Mission ROM Mode */
49 	*((uint32_t *)config) = HIF_BLOCK_SIZE; /* QCN_SDIO_MROM_BLK_SZ TODO */
50 }
51 
52 /**
53  * hif_dev_configure_pipes() - configure pipes
54  * @pdev: SDIO HIF object
55  * @func: sdio function object
56  *
57  * Return : 0 for success, non-zero for error
58  */
59 int hif_dev_configure_pipes(struct hif_sdio_dev *pdev, struct sdio_func *func)
60 {
61 	/* SDIO AL Configures SDIO Channels */
62 	return 0;
63 }
64 
65 /** hif_dev_set_mailbox_swap() - Set the mailbox swap
66  * @pdev : The HIF layer object
67  *
68  * Return: none
69  */
70 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
71 {
72 	/* SDIO AL doesn't use mailbox architecture */
73 }
74 
75 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
76  * @pdev : The HIF layer object
77  *
78  * Return: true or false
79  */
80 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
81 {
82 	/* SDIO AL doesn't use mailbox architecture */
83 	return false;
84 }
85 
86 /**
87  * hif_dev_dsr_handler() - Synchronous interrupt handler
88  *
89  * @context: hif send context
90  *
91  * Return: 0 for success and non-zero for failure
92  */
93 QDF_STATUS hif_dev_dsr_handler(void *context)
94 {
95 	/* SDIO AL handles interrupts */
96 	return QDF_STATUS_SUCCESS;
97 }
98 
99 /**
100  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
101  * @pDev: SDIO HIF object
102  * @ServiceId: sevice index
103  * @ULPipe: uplink pipe id
104  * @DLPipe: down-linklink pipe id
105  *
106  * Return: 0 on success, error value on invalid map
107  */
108 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
109 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
110 {
111 	QDF_STATUS status = QDF_STATUS_SUCCESS;
112 
113 	switch (svc) {
114 	case HTT_DATA_MSG_SVC:
115 		*dl_pipe = 2;
116 		*ul_pipe = 3;
117 		break;
118 
119 	case HTC_CTRL_RSVD_SVC:
120 	case HTC_RAW_STREAMS_SVC:
121 		*dl_pipe = 0;
122 		*ul_pipe = 1;
123 		break;
124 
125 	case WMI_DATA_BE_SVC:
126 	case WMI_DATA_BK_SVC:
127 	case WMI_DATA_VI_SVC:
128 	case WMI_DATA_VO_SVC:
129 		*dl_pipe = 2;
130 		*ul_pipe = 3;
131 		break;
132 
133 	case WMI_CONTROL_SVC:
134 		*dl_pipe = 0;
135 		*ul_pipe = 1;
136 		break;
137 
138 	default:
139 		hif_err("Invalid service: %d", svc);
140 		status = QDF_STATUS_E_INVAL;
141 		break;
142 	}
143 	return status;
144 }
145 
146 /**
147  * hif_bus_configure() - configure the bus
148  * @hif_sc: pointer to the hif context.
149  *
150  * return: 0 for success. nonzero for failure.
151  */
152 int hif_sdio_bus_configure(struct hif_softc *hif_sc)
153 {
154 	struct pld_wlan_enable_cfg cfg;
155 	enum pld_driver_mode mode;
156 	uint32_t con_mode = hif_get_conparam(hif_sc);
157 
158 	if (con_mode == QDF_GLOBAL_FTM_MODE)
159 		mode = PLD_FTM;
160 	else if (con_mode == QDF_GLOBAL_COLDBOOT_CALIB_MODE)
161 		mode = PLD_COLDBOOT_CALIBRATION;
162 	else if (QDF_IS_EPPING_ENABLED(con_mode))
163 		mode = PLD_EPPING;
164 	else
165 		mode = PLD_MISSION;
166 
167 	return pld_wlan_enable(hif_sc->qdf_dev->dev, &cfg, mode);
168 }
169 
170 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
171  * @pdev : HIF layer object
172  *
173  * return 0 on success, error otherwise
174  */
175 int hif_dev_setup_device(struct hif_sdio_device *pdev)
176 {
177 	hif_dev_get_block_size(&pdev->BlockSize);
178 
179 	return 0;
180 }
181 
182 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
183  * @pdev SDIO HIF Object
184  *
185  * Return: NONE
186  */
187 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
188 {
189 	/* SDIO AL Handles Interrupts */
190 }
191 
192 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
193  * @pdev SDIO HIF Object
194  *
195  * Return: NONE
196  */
197 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
198 {
199 	/* SDIO AL Handles Interrupts */
200 }
201 
202 /**
203  * hif_dev_map_pipe_to_adma_chan() - maps pipe id to adma chan
204  * @pdev: The pointer to the hif device object
205  * @pipeid: pipe index
206  *
207  * Return: adma channel handle
208  */
209 struct sdio_al_channel_handle *hif_dev_map_pipe_to_adma_chan
210 (
211 struct hif_sdio_device *dev,
212 uint8_t pipeid
213 )
214 {
215 	struct hif_sdio_dev *pdev = dev->HIFDevice;
216 
217 	HIF_ENTER();
218 
219 	if ((pipeid == 0) || (pipeid == 1))
220 		return pdev->al_chan[0];
221 	else if ((pipeid == 2) || (pipeid == 3))
222 		return pdev->al_chan[1];
223 	else
224 		return NULL;
225 }
226 
227 /**
228  * hif_dev_map_adma_chan_to_pipe() - map adma chan to htc pipe
229  * @pdev: The pointer to the hif device object
230  * @chan: channel number
231  * @upload: boolean to decide upload or download
232  *
233  * Return: Invalid pipe index
234  */
235 uint8_t hif_dev_map_adma_chan_to_pipe(struct hif_sdio_device *pdev,
236 				      uint8_t chan, bool upload)
237 {
238 	hif_info("chan: %u, %s", chan, upload ? "Upload" : "Download");
239 
240 	if (chan == 0) /* chan 0 is mapped to HTT */
241 		return upload ? 1 : 0;
242 	else if (chan == 1) /* chan 1 is mapped to WMI */
243 		return upload ? 3 : 2;
244 
245 	return (uint8_t)-1; /* invalid channel id */
246 }
247 
248 /**
249  * hif_get_send_address() - Get the transfer pipe address
250  * @pdev: The pointer to the hif device object
251  * @pipe: The pipe identifier
252  *
253  * Return 0 for success and non-zero for failure to map
254  */
255 int hif_get_send_address(struct hif_sdio_device *pdev,
256 			 uint8_t pipe, unsigned long *addr)
257 {
258 	struct sdio_al_channel_handle *chan = NULL;
259 
260 	if (!addr)
261 		return -EINVAL;
262 
263 	*addr = 0;
264 	chan = hif_dev_map_pipe_to_adma_chan(pdev, pipe);
265 
266 	if (!chan)
267 		return -EINVAL;
268 
269 	*addr = (unsigned long)chan;
270 
271 	return 0;
272 }
273 
274 /**
275  * hif_fixup_write_param() - Tweak the address and length parameters
276  * @pdev: The pointer to the hif device object
277  * @length: The length pointer
278  * @addr: The addr pointer
279  *
280  * Return: None
281  */
282 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
283 			   uint32_t *length, uint32_t *addr)
284 {
285 	HIF_ENTER();
286 	HIF_EXIT();
287 }
288 
289 #define HIF_MAX_RX_Q_ALLOC 0 /* TODO */
290 #define HIF_RX_Q_ALLOC_THRESHOLD 100
291 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
292 			    struct sdio_func *func,
293 			    bool reset)
294 {
295 	QDF_STATUS status = QDF_STATUS_SUCCESS;
296 #if HIF_MAX_RX_Q_ALLOC
297 	qdf_list_node_t *node;
298 	struct rx_q_entry *rx_q_elem;
299 #endif
300 	HIF_ENTER();
301 
302 #if HIF_MAX_RX_Q_ALLOC
303 	qdf_spin_lock_irqsave(&device->rx_q_lock);
304 
305 	for (; device->rx_q.count; ) {
306 		qdf_list_remove_back(&device->rx_q, &node);
307 		rx_q_elem = container_of(node, struct rx_q_entry, entry);
308 		if (rx_q_elem) {
309 			if (rx_q_elem->nbuf)
310 				qdf_nbuf_free(rx_q_elem->nbuf);
311 			qdf_mem_free(rx_q_elem);
312 		}
313 	}
314 	qdf_destroy_work(0, &device->rx_q_alloc_work);
315 
316 	qdf_spin_unlock_irqrestore(&device->rx_q_lock);
317 
318 	qdf_spinlock_destroy(&device->rx_q_lock);
319 #endif
320 
321 	status = hif_sdio_func_disable(device, func, reset);
322 	if (status == QDF_STATUS_SUCCESS)
323 		device->is_disabled = true;
324 
325 	cleanup_hif_scatter_resources(device);
326 
327 	HIF_EXIT();
328 
329 	return status;
330 }
331 
332 /**
333  * hif_enable_func() - Enable SDIO function
334  *
335  * @ol_sc: HIF object pointer
336  * @device: HIF device pointer
337  * @sdio_func: SDIO function pointer
338  * @resume: If this is called from resume or probe
339  *
340  * Return: 0 in case of success, else error value
341  */
342 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
343 			   struct sdio_func *func, bool resume)
344 {
345 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
346 
347 	if (!device) {
348 		hif_err("HIF device is NULL");
349 		return QDF_STATUS_E_INVAL;
350 	}
351 
352 	if (!resume)
353 		ret = hif_sdio_probe(ol_sc, func, device);
354 
355 #if HIF_MAX_RX_Q_ALLOC
356 	if (!ret) {
357 		qdf_list_create(&device->rx_q, HIF_MAX_RX_Q_ALLOC);
358 		qdf_spinlock_create(&device->rx_q_lock);
359 		qdf_create_work(0, &device->rx_q_alloc_work,
360 				hif_sdio_rx_q_alloc, (void *)device);
361 		device->rx_q_alloc_work_scheduled = true;
362 		qdf_sched_work(0, &device->rx_q_alloc_work);
363 	}
364 #endif
365 	return ret;
366 }
367 
368 /**
369  * hif_sdio_get_net_buf() - Get a network buffer from the rx q
370  * @dev - HIF device object
371  *
372  * Return - NULL if out of buffers, else qdf_nbuf_t
373  */
374 #if HIF_MAX_RX_Q_ALLOC
375 static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len)
376 {
377 	qdf_list_node_t *node;
378 	qdf_nbuf_t nbuf = NULL;
379 	qdf_list_t *q = &dev->rx_q;
380 	struct rx_q_entry *elem = NULL;
381 
382 	/* TODO - Alloc nbuf based on buf_len */
383 	qdf_spin_lock_irqsave(&dev->rx_q_lock);
384 
385 	if (q->count) {
386 		qdf_list_remove_front(q, &node);
387 		elem = qdf_container_of(node, struct rx_q_entry, entry);
388 		nbuf = elem->nbuf;
389 	} else {
390 		hif_err("no rx q elements");
391 	}
392 
393 	if (q->count <= HIF_RX_Q_ALLOC_THRESHOLD &&
394 	    !dev->rx_q_alloc_work_scheduled) {
395 		dev->rx_q_alloc_work_scheduled = true;
396 		qdf_sched_work(0, &dev->rx_q_alloc_work);
397 	}
398 
399 	qdf_spin_unlock_irqrestore(&dev->rx_q_lock);
400 
401 	qdf_mem_free(elem);
402 
403 	return nbuf;
404 }
405 #else
406 static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len)
407 {
408 	qdf_nbuf_t nbuf;
409 
410 	if (!buf_len)
411 		buf_len = HIF_SDIO_RX_BUFFER_SIZE;
412 
413 	nbuf = qdf_nbuf_alloc(NULL, buf_len, 0, 4, false);
414 
415 	return nbuf;
416 }
417 #endif
418 /**
419  * hif_sdio_rx_q_alloc() - Deferred work for pre-alloc rx q
420  * @ctx - Pointer to context object
421  *
422  * Return NONE
423  */
424 #if HIF_MAX_RX_Q_ALLOC
425 void hif_sdio_rx_q_alloc(void *ctx)
426 {
427 	struct rx_q_entry *rx_q_elem;
428 	struct hif_sdio_dev *dev = (struct hif_sdio_dev *)ctx;
429 	unsigned int rx_q_count = dev->rx_q.count;
430 
431 	HIF_ENTER();
432 	qdf_spin_lock_irqsave(&dev->rx_q_lock);
433 
434 	for (; rx_q_count < dev->rx_q.max_size; rx_q_count++) {
435 		rx_q_elem = qdf_mem_malloc(sizeof(struct rx_q_entry));
436 		if (!rx_q_elem) {
437 			hif_err("Failed to alloc rx q elem");
438 			break;
439 		}
440 
441 		/* TODO - Alloc nbuf based on payload_len in HTC Header */
442 		rx_q_elem->nbuf = qdf_nbuf_alloc(NULL, HIF_SDIO_RX_BUFFER_SIZE,
443 						 0, 4, false);
444 		if (!rx_q_elem->nbuf) {
445 			hif_err("Failed to alloc nbuf for rx");
446 			qdf_mem_free(rx_q_elem);
447 			break;
448 		}
449 
450 		qdf_list_insert_back(&dev->rx_q, &rx_q_elem->entry);
451 	}
452 	dev->rx_q_alloc_work_scheduled = false;
453 
454 	qdf_spin_unlock_irqrestore(&dev->rx_q_lock);
455 	HIF_EXIT();
456 }
457 #else
458 void hif_sdio_rx_q_alloc(void *ctx)
459 {
460 }
461 #endif
462 
463 #include <linux/qcn_sdio_al.h>
464 
465 struct sdio_al_channel_data qcn7605_chan[HIF_SDIO_MAX_AL_CHANNELS] = {
466 	{
467 		.name = "SDIO_AL_WLAN_CH0", /* HTT */
468 		.client_data = NULL, /* populate from client handle */
469 		.ul_xfer_cb = ul_xfer_cb,
470 		.dl_xfer_cb = dl_xfer_cb,
471 		.dl_data_avail_cb = dl_data_avail_cb,
472 		.dl_meta_data_cb = NULL
473 	},
474 	{
475 		.name = "SDIO_AL_WLAN_CH1", /* WMI */
476 		.client_data = NULL, /* populate from client handle */
477 		.ul_xfer_cb = ul_xfer_cb,
478 		.dl_xfer_cb = dl_xfer_cb,
479 		.dl_data_avail_cb = dl_data_avail_cb,
480 		.dl_meta_data_cb = NULL
481 	}
482 };
483 
484 /**
485  * hif_dev_register_channels()- Register transport layer channels
486  * @dev  : HIF device object
487  * @func : SDIO function pointer
488  *
489  * Return : success on configuration, else failure
490  */
491 int hif_dev_register_channels(struct hif_sdio_dev *dev, struct sdio_func *func)
492 {
493 	int ret = 0;
494 	unsigned int chan;
495 	struct sdio_al_channel_data *chan_data[HIF_ADMA_MAX_CHANS];
496 
497 	HIF_ENTER();
498 
499 	dev->al_client = pld_sdio_get_sdio_al_client_handle(func);
500 	if (ret || !dev->al_client) {
501 		hif_err("Failed to get get sdio al handle");
502 		return ret;
503 	}
504 
505 	if ((func->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
506 	    MANUFACTURER_ID_QCN7605_BASE) {
507 		dev->adma_chans_used = 2;
508 		qcn7605_chan[0].client_data = dev->al_client->client_data;
509 		qcn7605_chan[1].client_data = dev->al_client->client_data;
510 		chan_data[0] = &qcn7605_chan[0];
511 		chan_data[1] = &qcn7605_chan[1];
512 	} else {
513 		dev->adma_chans_used = 0;
514 	}
515 
516 	for (chan = 0; chan < dev->adma_chans_used; chan++) {
517 		dev->al_chan[chan] =
518 		pld_sdio_register_sdio_al_channel(dev->al_client,
519 						  chan_data[chan]);
520 		if (!dev->al_chan[chan] || IS_ERR(dev->al_chan[chan])) {
521 			ret = -EINVAL;
522 			hif_err("Channel registration failed");
523 		} else {
524 			dev->al_chan[chan]->priv = (void *)dev;
525 			hif_info("chan %s : id : %u",
526 				 chan_data[chan]->name,
527 				 dev->al_chan[chan]->channel_id);
528 		}
529 	}
530 
531 	HIF_EXIT();
532 
533 	return ret;
534 }
535 
536 /**
537  * hif_dev_unregister_channels()- Register transport layer channels
538  * @dev  : HIF device object
539  * @func : SDIO Function pointer
540  *
541  * Return : None
542  */
543 void hif_dev_unregister_channels(struct hif_sdio_dev *dev,
544 				 struct sdio_func *func)
545 {
546 	unsigned int chan;
547 
548 	if (!dev) {
549 		hif_err("hif_sdio_dev is null");
550 		return;
551 	}
552 
553 	for (chan = 0; chan < dev->adma_chans_used; chan++) {
554 		dev->al_chan[chan]->priv = NULL;
555 		pld_sdio_unregister_sdio_al_channel(dev->al_chan[chan]);
556 	}
557 }
558 
559 /**
560  * hif_read_write() - queue a read/write request
561  * @dev: pointer to hif device structure
562  * @address: address to read, actually channel pointer
563  * @buffer: buffer to hold read/write data
564  * @length: length to read/write
565  * @request: read/write/sync/async request
566  * @context: pointer to hold calling context
567  *
568  * Return: 0, pending  on success, error number otherwise.
569  */
570 QDF_STATUS
571 hif_read_write(struct hif_sdio_dev *dev,
572 	       unsigned long sdio_al_ch_handle,
573 	       char *cbuffer, uint32_t length,
574 	       uint32_t request, void *context)
575 {
576 	QDF_STATUS status = QDF_STATUS_SUCCESS;
577 	struct sdio_al_channel_handle *ch;
578 	struct bus_request *bus_req;
579 	enum sdio_al_dma_direction dir;
580 	struct hif_sdio_device *device;
581 	QDF_STATUS (*rx_comp)(void *, qdf_nbuf_t, uint8_t);
582 	qdf_nbuf_t nbuf;
583 	int ret = 0, payload_len = 0;
584 	unsigned char *buffer = (unsigned char *)cbuffer;
585 
586 	if (!dev || !sdio_al_ch_handle) {
587 		hif_err("Device = %pK, addr = %lu", dev, sdio_al_ch_handle);
588 		return QDF_STATUS_E_INVAL;
589 	}
590 
591 	if (!(request & HIF_ASYNCHRONOUS) &&
592 	    !(request & HIF_SYNCHRONOUS)) {
593 		hif_err("Invalid request mode: %d", request);
594 		return QDF_STATUS_E_INVAL;
595 	}
596 
597 	/*sdio r/w action is not needed when suspend, so just return */
598 	if ((dev->is_suspend) &&
599 	    (dev->power_config == HIF_DEVICE_POWER_CUT)) {
600 		hif_info("skip in suspend");
601 		return QDF_STATUS_SUCCESS;
602 	}
603 
604 	ch = (struct sdio_al_channel_handle *)sdio_al_ch_handle;
605 
606 	bus_req = hif_allocate_bus_request(dev);
607 	if (!bus_req) {
608 		hif_err("Bus alloc failed");
609 		return QDF_STATUS_E_FAILURE;
610 	}
611 
612 	bus_req->address = sdio_al_ch_handle;
613 	bus_req->length = length;
614 	bus_req->request = request;
615 	bus_req->context = context;
616 	bus_req->buffer = buffer;
617 
618 	/* Request SDIO AL to do transfer */
619 	dir = (request & HIF_SDIO_WRITE) ? SDIO_AL_TX : SDIO_AL_RX;
620 
621 	if (request & HIF_SYNCHRONOUS) {
622 		ret = sdio_al_queue_transfer(ch,
623 					     dir,
624 					     bus_req->buffer,
625 					     bus_req->length,
626 					     1); /* higher priority */
627 		if (ret) {
628 			status = QDF_STATUS_E_FAILURE;
629 			hif_err("SYNC REQ failed ret: %d", ret);
630 		} else {
631 			status = QDF_STATUS_SUCCESS;
632 		}
633 
634 		hif_free_bus_request(dev, bus_req);
635 
636 		if ((status == QDF_STATUS_SUCCESS) && (dir == SDIO_AL_RX)) {
637 			nbuf = (qdf_nbuf_t)context;
638 			payload_len = HTC_GET_FIELD(bus_req->buffer,
639 						    HTC_FRAME_HDR,
640 						    PAYLOADLEN);
641 			qdf_nbuf_set_pktlen(nbuf, payload_len + HTC_HDR_LENGTH);
642 			device = (struct hif_sdio_device *)dev->htc_context;
643 			rx_comp = device->hif_callbacks.rxCompletionHandler;
644 			rx_comp(device->hif_callbacks.Context, nbuf, 0);
645 		}
646 	} else {
647 		ret = sdio_al_queue_transfer_async(ch,
648 						   dir,
649 						   bus_req->buffer,
650 						   bus_req->length,
651 						   1, /* higher priority */
652 						   (void *)bus_req);
653 		if (ret) {
654 			status = QDF_STATUS_E_FAILURE;
655 			hif_err("ASYNC REQ fail ret: %d for len: %d ch: %d",
656 				ret, length, ch->channel_id);
657 			hif_free_bus_request(dev, bus_req);
658 		} else {
659 			status = QDF_STATUS_E_PENDING;
660 		}
661 	}
662 	return status;
663 }
664 
665 /**
666  * ul_xfer_cb() - Completion call back for asynchronous transfer
667  * @ch_handle: The sdio al channel handle
668  * @result: The result of the operation
669  * @context: pointer to request context
670  *
671  * Return: None
672  */
673 void ul_xfer_cb(struct sdio_al_channel_handle *ch_handle,
674 		struct sdio_al_xfer_result *result,
675 		void *ctx)
676 {
677 	struct bus_request *req = (struct bus_request *)ctx;
678 	struct hif_sdio_dev *dev;
679 
680 	if (!ch_handle || !result) {
681 		hif_err("Invalid args");
682 		qdf_assert_always(0);
683 		return;
684 	}
685 
686 	dev = (struct hif_sdio_dev *)ch_handle->priv;
687 
688 	if (result->xfer_status) {
689 		req->status = QDF_STATUS_E_FAILURE;
690 		hif_err("ASYNC Tx failed status: %d", result->xfer_status);
691 	} else {
692 		req->status = QDF_STATUS_SUCCESS;
693 	}
694 
695 	dev->htc_callbacks.rw_compl_handler(req->context, req->status);
696 
697 	hif_free_bus_request(dev, req);
698 }
699 
700 /**
701  * dl_data_avail_cb() - Called when data is available on a channel
702  * @ch_handle: The sdio al channel handle
703  * @len: The len of data available to download
704  *
705  * Return: None
706  */
707 /* Use the asynchronous method of transfer. This will help in
708  * completing READ in the transfer done callback later which
709  * runs in sdio al thread context. If we do the syncronous
710  * transfer here, the thread context won't be available and
711  * perhaps a new thread may be required here.
712  */
713 void dl_data_avail_cb(struct sdio_al_channel_handle *ch_handle,
714 		      unsigned int len)
715 {
716 	struct hif_sdio_dev *dev;
717 	unsigned int chan;
718 	qdf_nbuf_t nbuf;
719 
720 	if (!ch_handle || !len) {
721 		hif_err("Invalid args %u", len);
722 		qdf_assert_always(0);
723 		return;
724 	}
725 
726 	dev = (struct hif_sdio_dev *)ch_handle->priv;
727 	chan = ch_handle->channel_id;
728 
729 	if (chan > HIF_SDIO_MAX_AL_CHANNELS) {
730 		hif_err("Invalid Ch ID %d", chan);
731 		return;
732 	}
733 
734 	/* allocate a buffer for reading the data from the chip.
735 	 * Note that this is raw, unparsed buffer and will be
736 	 * processed in the transfer done callback.
737 	 */
738 	/* TODO, use global buffer instead of runtime allocations */
739 	nbuf = qdf_nbuf_alloc(NULL, len, 0, 4, false);
740 
741 	if (!nbuf) {
742 		hif_err("Unable to alloc netbuf %u bytes", len);
743 		return;
744 	}
745 
746 	hif_read_write(dev, (unsigned long)ch_handle, nbuf->data, len,
747 		       HIF_RD_ASYNC_BLOCK_FIX, nbuf);
748 }
749 
750 #define is_pad_block(buf)	(*((uint32_t *)buf) == 0xbabababa)
751 uint16_t g_dbg_payload_len;
752 
753 /**
754  * dl_xfer_cb() - Call from lower layer after transfer is completed
755  * @ch_handle: The sdio al channel handle
756  * @result: The xfer result
757  * @ctx: Context passed in the transfer queuing
758  *
759  * Return: None
760  */
761 void dl_xfer_cb(struct sdio_al_channel_handle *ch_handle,
762 		struct sdio_al_xfer_result *result,
763 		void *ctx)
764 {
765 	unsigned char *buf;
766 	qdf_nbuf_t nbuf;
767 	uint32_t len;
768 	uint16_t payload_len = 0;
769 	struct hif_sdio_dev *dev;
770 	struct hif_sdio_device *device;
771 	struct bus_request *bus_req = (struct bus_request *)ctx;
772 	QDF_STATUS (*rx_completion)(void *, qdf_nbuf_t, uint8_t);
773 
774 	if (!bus_req) {
775 		hif_err("Bus Req NULL!!!");
776 		qdf_assert_always(0);
777 		return;
778 	}
779 
780 	if (!ch_handle || !result) {
781 		hif_err("Invalid args %pK %pK", ch_handle, result);
782 		qdf_assert_always(0);
783 		return;
784 	}
785 
786 	dev = (struct hif_sdio_dev *)ch_handle->priv;
787 	if (result->xfer_status) {
788 		hif_err("ASYNC Rx failed %d", result->xfer_status);
789 		qdf_nbuf_free((qdf_nbuf_t)bus_req->context);
790 		hif_free_bus_request(dev, bus_req);
791 		return;
792 	}
793 
794 	device = (struct hif_sdio_device *)dev->htc_context;
795 	rx_completion = device->hif_callbacks.rxCompletionHandler;
796 
797 	buf = (unsigned char *)result->buf_addr;
798 	len = (unsigned int)result->xfer_len;
799 
800 	while (len >= sizeof(HTC_FRAME_HDR)) {
801 		if (is_pad_block(buf)) {
802 			/* End of Rx Buffer */
803 			break;
804 		}
805 
806 		if (HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID) >=
807 		    ENDPOINT_MAX) {
808 			hif_err("Invalid endpoint id: %u",
809 				HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID));
810 			break;
811 		}
812 
813 		/* Copy the HTC frame to the alloc'd packet buffer */
814 		payload_len = HTC_GET_FIELD(buf, HTC_FRAME_HDR, PAYLOADLEN);
815 		payload_len = qdf_le16_to_cpu(payload_len);
816 		if (!payload_len) {
817 			hif_err("Invalid Payload len %d bytes", payload_len);
818 			break;
819 		}
820 		if (payload_len > g_dbg_payload_len) {
821 			g_dbg_payload_len = payload_len;
822 			hif_err("Max Rx HTC Payload = %d", g_dbg_payload_len);
823 		}
824 
825 		nbuf = hif_sdio_get_nbuf(dev, payload_len + HTC_HEADER_LEN);
826 		if (!nbuf) {
827 			hif_err("Failed to alloc rx buffer");
828 			break;
829 		}
830 
831 		/* Check if payload fits in skb */
832 		if (qdf_nbuf_tailroom(nbuf) < payload_len + HTC_HEADER_LEN) {
833 			hif_err("Payload + HTC_HDR %d > skb tailroom %d",
834 				(payload_len + 8),
835 				qdf_nbuf_tailroom(nbuf));
836 			qdf_nbuf_free(nbuf);
837 			break;
838 		}
839 
840 		qdf_mem_copy((uint8_t *)qdf_nbuf_data(nbuf), buf,
841 			     payload_len + HTC_HEADER_LEN);
842 
843 		qdf_nbuf_put_tail(nbuf, payload_len + HTC_HDR_LENGTH);
844 
845 		rx_completion(device->hif_callbacks.Context, nbuf,
846 			      0); /* don't care, not used */
847 
848 		len -= payload_len + HTC_HDR_LENGTH;
849 		buf += payload_len + HTC_HDR_LENGTH;
850 	}
851 
852 	qdf_nbuf_free((qdf_nbuf_t)bus_req->context);
853 	hif_free_bus_request(dev, bus_req);
854 }
855