xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_bmi.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include "targcfg.h"
29 #include "qdf_lock.h"
30 #include "qdf_status.h"
31 #include "qdf_status.h"
32 #include <qdf_atomic.h>         /* qdf_atomic_read */
33 #include <targaddrs.h>
34 #include "hif_io32.h"
35 #include <hif.h>
36 #include "regtable.h"
37 #define ATH_MODULE_NAME hif
38 #include <a_debug.h>
39 #include "hif_main.h"
40 #include "ce_api.h"
41 #include "ce_bmi.h"
42 #include "qdf_trace.h"
43 #include "hif_debug.h"
44 #include "bmi_msg.h"
45 #include "qdf_module.h"
46 
47 /* Track a BMI transaction that is in progress */
48 #ifndef BIT
49 #define BIT(n) (1 << (n))
50 #endif
51 
52 enum {
53 	BMI_REQ_SEND_DONE = BIT(0),   /* the bmi tx completion */
54 	BMI_RESP_RECV_DONE = BIT(1),  /* the bmi respond is received */
55 };
56 
57 struct BMI_transaction {
58 	struct HIF_CE_state *hif_state;
59 	qdf_semaphore_t bmi_transaction_sem;
60 	uint8_t *bmi_request_host;        /* Req BMI msg in Host addr space */
61 	qdf_dma_addr_t bmi_request_CE;    /* Req BMI msg in CE addr space */
62 	uint32_t bmi_request_length;      /* Length of BMI request */
63 	uint8_t *bmi_response_host;       /* Rsp BMI msg in Host addr space */
64 	qdf_dma_addr_t bmi_response_CE;   /* Rsp BMI msg in CE addr space */
65 	unsigned int bmi_response_length; /* Length of received response */
66 	unsigned int bmi_timeout_ms;
67 	uint32_t bmi_transaction_flags;   /* flags for the transcation */
68 };
69 
70 /*
71  * send/recv completion functions for BMI.
72  * NB: The "net_buf" parameter is actually just a
73  * straight buffer, not an sk_buff.
74  */
75 void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context,
76 		  void *transfer_context, qdf_dma_addr_t data,
77 		  unsigned int nbytes,
78 		  unsigned int transfer_id, unsigned int sw_index,
79 		  unsigned int hw_index, uint32_t toeplitz_hash_result)
80 {
81 	struct BMI_transaction *transaction =
82 		(struct BMI_transaction *)transfer_context;
83 
84 #ifdef BMI_RSP_POLLING
85 	/*
86 	 * Fix EV118783, Release a semaphore after sending
87 	 * no matter whether a response is been expecting now.
88 	 */
89 	qdf_semaphore_release(&transaction->bmi_transaction_sem);
90 #else
91 	/*
92 	 * If a response is anticipated, we'll complete the
93 	 * transaction if the response has been received.
94 	 * If no response is anticipated, complete the
95 	 * transaction now.
96 	 */
97 	transaction->bmi_transaction_flags |= BMI_REQ_SEND_DONE;
98 
99 	/* resp is't needed or has already been received,
100 	 * never assume resp comes later then this
101 	 */
102 	if (!transaction->bmi_response_CE ||
103 	    (transaction->bmi_transaction_flags & BMI_RESP_RECV_DONE)) {
104 		qdf_semaphore_release(&transaction->bmi_transaction_sem);
105 	}
106 #endif
107 }
108 
109 #ifndef BMI_RSP_POLLING
110 void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context,
111 		  void *transfer_context, qdf_dma_addr_t data,
112 		  unsigned int nbytes,
113 		  unsigned int transfer_id, unsigned int flags)
114 {
115 	struct BMI_transaction *transaction =
116 		(struct BMI_transaction *)transfer_context;
117 
118 	transaction->bmi_response_length = nbytes;
119 	transaction->bmi_transaction_flags |= BMI_RESP_RECV_DONE;
120 
121 	/* when both send/recv are done, the sem can be released */
122 	if (transaction->bmi_transaction_flags & BMI_REQ_SEND_DONE)
123 		qdf_semaphore_release(&transaction->bmi_transaction_sem);
124 }
125 #endif
126 
127 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
128 				qdf_dma_addr_t bmi_cmd_da,
129 				qdf_dma_addr_t bmi_rsp_da,
130 				uint8_t *bmi_request,
131 				uint32_t request_length,
132 				uint8_t *bmi_response,
133 				uint32_t *bmi_response_lengthp,
134 				uint32_t TimeoutMS)
135 {
136 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
137 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
138 	struct HIF_CE_pipe_info *send_pipe_info =
139 		&(hif_state->pipe_info[BMI_CE_NUM_TO_TARG]);
140 	struct CE_handle *ce_send_hdl = send_pipe_info->ce_hdl;
141 	qdf_dma_addr_t CE_request, CE_response = 0;
142 	struct BMI_transaction *transaction = NULL;
143 	int status = QDF_STATUS_SUCCESS;
144 	struct HIF_CE_pipe_info *recv_pipe_info =
145 		&(hif_state->pipe_info[BMI_CE_NUM_TO_HOST]);
146 	struct CE_handle *ce_recv = recv_pipe_info->ce_hdl;
147 	unsigned int mux_id = 0;
148 	unsigned int transaction_id = 0xffff;
149 	unsigned int user_flags = 0;
150 #ifdef BMI_RSP_POLLING
151 	qdf_dma_addr_t buf;
152 	unsigned int completed_nbytes, id, flags;
153 	int i;
154 #endif
155 
156 	transaction =
157 		(struct BMI_transaction *)qdf_mem_malloc(sizeof(*transaction));
158 	if (unlikely(!transaction)) {
159 		HIF_ERROR("%s: no memory", __func__);
160 		return QDF_STATUS_E_NOMEM;
161 	}
162 	transaction_id = (mux_id & MUX_ID_MASK) |
163 		(transaction_id & TRANSACTION_ID_MASK);
164 #ifdef QCA_WIFI_3_0
165 	user_flags &= DESC_DATA_FLAG_MASK;
166 #endif
167 	A_TARGET_ACCESS_LIKELY(scn);
168 
169 	/* Initialize bmi_transaction_sem to block */
170 	qdf_semaphore_init(&transaction->bmi_transaction_sem);
171 	qdf_semaphore_acquire(&transaction->bmi_transaction_sem);
172 
173 	transaction->hif_state = hif_state;
174 	transaction->bmi_request_host = bmi_request;
175 	transaction->bmi_request_length = request_length;
176 	transaction->bmi_response_length = 0;
177 	transaction->bmi_timeout_ms = TimeoutMS;
178 	transaction->bmi_transaction_flags = 0;
179 
180 	/*
181 	 * CE_request = dma_map_single(dev,
182 	 * (void *)bmi_request, request_length, DMA_TO_DEVICE);
183 	 */
184 	CE_request = bmi_cmd_da;
185 	transaction->bmi_request_CE = CE_request;
186 
187 	if (bmi_response) {
188 
189 		/*
190 		 * CE_response = dma_map_single(dev, bmi_response,
191 		 * BMI_DATASZ_MAX, DMA_FROM_DEVICE);
192 		 */
193 		CE_response = bmi_rsp_da;
194 		transaction->bmi_response_host = bmi_response;
195 		transaction->bmi_response_CE = CE_response;
196 		/* dma_cache_sync(dev, bmi_response,
197 		 *      BMI_DATASZ_MAX, DMA_FROM_DEVICE);
198 		 */
199 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev,
200 					       CE_response,
201 					       BMI_DATASZ_MAX,
202 					       DMA_FROM_DEVICE);
203 		ce_recv_buf_enqueue(ce_recv, transaction,
204 				    transaction->bmi_response_CE);
205 		/* NB: see HIF_BMI_recv_done */
206 	} else {
207 		transaction->bmi_response_host = NULL;
208 		transaction->bmi_response_CE = 0;
209 	}
210 
211 	/* dma_cache_sync(dev, bmi_request, request_length, DMA_TO_DEVICE); */
212 	qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_request,
213 				       request_length, DMA_TO_DEVICE);
214 
215 	status =
216 		ce_send(ce_send_hdl, transaction,
217 			CE_request, request_length,
218 			transaction_id, 0, user_flags);
219 	ASSERT(status == QDF_STATUS_SUCCESS);
220 	/* NB: see hif_bmi_send_done */
221 
222 	/* TBDXXX: handle timeout */
223 
224 	/* Wait for BMI request/response transaction to complete */
225 	/* Always just wait for BMI request here if
226 	 * BMI_RSP_POLLING is defined
227 	 */
228 	while (qdf_semaphore_acquire
229 		       (&transaction->bmi_transaction_sem)) {
230 		/*need some break out condition(time out?) */
231 	}
232 
233 	if (bmi_response) {
234 #ifdef BMI_RSP_POLLING
235 		/* Fix EV118783, do not wait a semaphore for the BMI response
236 		 * since the relative interruption may be lost.
237 		 * poll the BMI response instead.
238 		 */
239 		i = 0;
240 		while (ce_completed_recv_next(
241 			    ce_recv, NULL, NULL, &buf,
242 			    &completed_nbytes, &id,
243 			    &flags) != QDF_STATUS_SUCCESS) {
244 			if (i++ > BMI_RSP_TO_MILLISEC) {
245 				HIF_ERROR("%s:error, can't get bmi response",
246 					__func__);
247 				status = QDF_STATUS_E_BUSY;
248 				break;
249 			}
250 			OS_DELAY(1000);
251 		}
252 
253 		if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp)
254 			*bmi_response_lengthp = completed_nbytes;
255 #else
256 		if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp) {
257 			*bmi_response_lengthp =
258 				transaction->bmi_response_length;
259 		}
260 #endif
261 
262 	}
263 
264 	/* dma_unmap_single(dev, transaction->bmi_request_CE,
265 	 *     request_length, DMA_TO_DEVICE);
266 	 * bus_unmap_single(scn->sc_osdev,
267 	 *     transaction->bmi_request_CE,
268 	 *     request_length, BUS_DMA_TODEVICE);
269 	 */
270 
271 	if (status != QDF_STATUS_SUCCESS) {
272 		qdf_dma_addr_t unused_buffer;
273 		unsigned int unused_nbytes;
274 		unsigned int unused_id;
275 		unsigned int toeplitz_hash_result;
276 
277 		ce_cancel_send_next(ce_send_hdl,
278 			NULL, NULL, &unused_buffer,
279 			&unused_nbytes, &unused_id,
280 			&toeplitz_hash_result);
281 	}
282 
283 	A_TARGET_ACCESS_UNLIKELY(scn);
284 	qdf_mem_free(transaction);
285 	return status;
286 }
287 qdf_export_symbol(hif_exchange_bmi_msg);
288 
289 #ifdef BMI_RSP_POLLING
290 #define BMI_RSP_CB_REGISTER 0
291 #else
292 #define BMI_RSP_CB_REGISTER 1
293 #endif
294 
295 /**
296  * hif_register_bmi_callbacks() - register bmi callbacks
297  * @hif_sc: hif context
298  *
299  * Bmi phase uses different copy complete callbacks than mission mode.
300  */
301 void hif_register_bmi_callbacks(struct hif_softc *hif_sc)
302 {
303 	struct HIF_CE_pipe_info *pipe_info;
304 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
305 
306 	/*
307 	 * Initially, establish CE completion handlers for use with BMI.
308 	 * These are overwritten with generic handlers after we exit BMI phase.
309 	 */
310 	pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
311 	ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
312 
313 	if (BMI_RSP_CB_REGISTER) {
314 		pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
315 		ce_recv_cb_register(
316 			pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
317 	}
318 }
319