1 /*
2  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include "dp_types.h"
21 #include "dp_internal.h"
22 #include "dp_rh_htt.h"
23 #include "dp_rh_rx.h"
24 #include "qdf_mem.h"
25 #include "cdp_txrx_cmn_struct.h"
26 #include "dp_tx_desc.h"
27 #include "dp_rh.h"
28 
29 #define HTT_MSG_BUF_SIZE(msg_bytes) \
30 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
31 
32 #define HTT_T2H_MSG_BUF_REINIT(_buf, dev)				\
33 	do {								\
34 		qdf_nbuf_push_head(_buf, (HTC_HEADER_LEN) +		\
35 				   HTC_HDR_ALIGNMENT_PADDING);		\
36 		qdf_nbuf_init_fast((_buf));				\
37 		qdf_mem_dma_sync_single_for_device(dev,			\
38 					(QDF_NBUF_CB_PADDR(_buf)),	\
39 					(skb_end_pointer(_buf) -	\
40 					(_buf)->data),			\
41 					PCI_DMA_FROMDEVICE);		\
42 	} while (0)
43 
44 /**
45  * dp_htt_flow_pool_map_handler_rh() - HTT_T2H_MSG_TYPE_FLOW_POOL_MAP handler
46  * @soc: Handle to DP Soc structure
47  * @flow_id: flow id
48  * @flow_type: flow type
49  * @flow_pool_id: pool id
50  * @flow_pool_size: pool size
51  *
52  * Return: QDF_STATUS_SUCCESS - success, others - failure
53  */
54 static QDF_STATUS
dp_htt_flow_pool_map_handler_rh(struct dp_soc * soc,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id,uint32_t flow_pool_size)55 dp_htt_flow_pool_map_handler_rh(struct dp_soc *soc, uint8_t flow_id,
56 				uint8_t flow_type, uint8_t flow_pool_id,
57 				uint32_t flow_pool_size)
58 {
59 	struct dp_vdev *vdev;
60 	struct dp_pdev *pdev;
61 	QDF_STATUS status;
62 
63 	if (flow_pool_id >= MAX_TXDESC_POOLS) {
64 		dp_err("invalid flow_pool_id %d", flow_pool_id);
65 		return QDF_STATUS_E_INVAL;
66 	}
67 
68 	vdev = dp_vdev_get_ref_by_id(soc, flow_id, DP_MOD_ID_HTT);
69 	if (vdev) {
70 		pdev = vdev->pdev;
71 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
72 	} else {
73 		pdev = soc->pdev_list[0];
74 	}
75 
76 	status = dp_tx_flow_pool_map_handler(pdev, flow_id, flow_type,
77 					     flow_pool_id, flow_pool_size);
78 	if (QDF_IS_STATUS_ERROR(status)) {
79 		dp_err("failed to create tx flow pool %d", flow_pool_id);
80 		goto err_out;
81 	}
82 
83 	return QDF_STATUS_SUCCESS;
84 
85 err_out:
86 	/* TODO: is assert needed ? */
87 	qdf_assert_always(0);
88 	return status;
89 }
90 
91 /**
92  * dp_htt_flow_pool_unmap_handler_rh() - HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP handler
93  * @soc: Handle to DP Soc structure
94  * @flow_id: flow id
95  * @flow_type: flow type
96  * @flow_pool_id: pool id
97  *
98  * Return: none
99  */
100 static void
dp_htt_flow_pool_unmap_handler_rh(struct dp_soc * soc,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id)101 dp_htt_flow_pool_unmap_handler_rh(struct dp_soc *soc, uint8_t flow_id,
102 				  uint8_t flow_type, uint8_t flow_pool_id)
103 {
104 	struct dp_vdev *vdev;
105 	struct dp_pdev *pdev;
106 
107 	if (flow_pool_id >= MAX_TXDESC_POOLS) {
108 		dp_err("invalid flow_pool_id %d", flow_pool_id);
109 		return;
110 	}
111 
112 	vdev = dp_vdev_get_ref_by_id(soc, flow_id, DP_MOD_ID_HTT);
113 	if (vdev) {
114 		pdev = vdev->pdev;
115 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
116 	} else {
117 		pdev = soc->pdev_list[0];
118 	}
119 
120 	dp_tx_flow_pool_unmap_handler(pdev, flow_id, flow_type,
121 				      flow_pool_id);
122 }
123 
124 /*
125  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
126  * @soc:	SOC handle
127  * @status:	Completion status
128  * @netbuf:	HTT buffer
129  */
130 static void
dp_htt_h2t_send_complete_free_netbuf(void * soc,A_STATUS status,qdf_nbuf_t netbuf)131 dp_htt_h2t_send_complete_free_netbuf(
132 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
133 {
134 	qdf_nbuf_free(netbuf);
135 }
136 
dp_htt_h2t_rx_ring_rfs_cfg(struct htt_soc * soc)137 QDF_STATUS dp_htt_h2t_rx_ring_rfs_cfg(struct htt_soc *soc)
138 {
139 	struct dp_htt_htc_pkt *pkt;
140 	qdf_nbuf_t msg;
141 	uint32_t *msg_word;
142 	QDF_STATUS status;
143 	uint8_t *htt_logger_bufp;
144 
145 	/*
146 	 * TODO check do we need ini support in Evros
147 	 * Receive flow steering configuration,
148 	 * disable gEnableFlowSteering(=0) in ini if
149 	 * FW doesn't support it
150 	 */
151 
152 	/* reserve room for the HTC header */
153 	msg = qdf_nbuf_alloc(soc->osdev,
154 			     HTT_MSG_BUF_SIZE(HTT_RFS_CFG_REQ_BYTES),
155 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
156 			     true);
157 	if (!msg) {
158 		dp_err("htt_msg alloc failed for RFS config");
159 		return QDF_STATUS_E_NOMEM;
160 	}
161 	/*
162 	 * Set the length of the message.
163 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
164 	 * separately during the below call to qdf_nbuf_push_head.
165 	 * The contribution from the HTC header is added separately inside HTC.
166 	 */
167 	qdf_nbuf_put_tail(msg, HTT_RFS_CFG_REQ_BYTES);
168 
169 	/* fill in the message contents */
170 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
171 
172 	/* rewind beyond alignment pad to get to the HTC header reserved area */
173 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
174 
175 	/* word 0 */
176 	*msg_word = 0;
177 	htt_logger_bufp = (uint8_t *)msg_word;
178 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RFS_CONFIG);
179 	HTT_RX_RFS_CONFIG_SET(*msg_word, 1);
180 
181 	/*
182 	 * TODO value should be obtained from ini maxMSDUsPerRxInd
183 	 * currently this ini is legacy ol and available only from cds
184 	 * make this ini common to HL and evros DP
185 	 */
186 	*msg_word |= ((32 & 0xff) << 16);
187 
188 	dp_htt_info("RFS sent to F.W: 0x%08x", *msg_word);
189 
190 	/*start*/
191 	pkt = htt_htc_pkt_alloc(soc);
192 	if (!pkt) {
193 		qdf_nbuf_free(msg);
194 		return QDF_STATUS_E_NOMEM;
195 	}
196 
197 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
198 	SET_HTC_PACKET_INFO_TX(
199 		&pkt->htc_pkt,
200 		dp_htt_h2t_send_complete_free_netbuf,
201 		qdf_nbuf_data(msg),
202 		qdf_nbuf_len(msg),
203 		soc->htc_endpoint,
204 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
205 
206 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
207 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RFS_CONFIG,
208 				     htt_logger_bufp);
209 
210 	if (status != QDF_STATUS_SUCCESS) {
211 		qdf_nbuf_free(msg);
212 		htt_htc_pkt_free(soc, pkt);
213 	}
214 
215 	return status;
216 }
217 
218 static void
dp_htt_rx_addba_handler_rh(struct dp_soc * soc,uint16_t peer_id,uint8_t tid,uint16_t win_sz)219 dp_htt_rx_addba_handler_rh(struct dp_soc *soc, uint16_t peer_id,
220 			   uint8_t tid, uint16_t win_sz)
221 {
222 }
223 
224 static QDF_STATUS
dp_htt_rx_delba_ind_handler_rh(void * soc_handle,uint16_t peer_id,uint8_t tid,uint16_t win_sz)225 dp_htt_rx_delba_ind_handler_rh(void *soc_handle, uint16_t peer_id,
226 			       uint8_t tid, uint16_t win_sz)
227 {
228 	return QDF_STATUS_SUCCESS;
229 }
230 
231 /**
232  * dp_htt_t2h_msg_handler_fast() -  Fastpath specific message handler
233  * @context: HTT context
234  * @cmpl_msdus: netbuf completions
235  * @num_cmpls: number of completions to be handled
236  *
237  * Return: None
238  */
239 static void
dp_htt_t2h_msg_handler_fast(void * context,qdf_nbuf_t * cmpl_msdus,uint32_t num_cmpls)240 dp_htt_t2h_msg_handler_fast(void *context, qdf_nbuf_t *cmpl_msdus,
241 			    uint32_t num_cmpls)
242 {
243 	struct htt_soc *soc = (struct htt_soc *)context;
244 	qdf_nbuf_t htt_t2h_msg;
245 	uint32_t *msg_word;
246 	uint32_t i;
247 	enum htt_t2h_msg_type msg_type;
248 	uint32_t msg_len;
249 
250 	for (i = 0; i < num_cmpls; i++) {
251 		htt_t2h_msg = cmpl_msdus[i];
252 		msg_len = qdf_nbuf_len(htt_t2h_msg);
253 
254 		/*
255 		 * Move the data pointer to point to HTT header
256 		 * past the HTC header + HTC header alignment padding
257 		 */
258 		qdf_nbuf_pull_head(htt_t2h_msg, HTC_HEADER_LEN +
259 				   HTC_HDR_ALIGNMENT_PADDING);
260 
261 		msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
262 		msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
263 
264 		switch (msg_type) {
265 		case HTT_T2H_MSG_TYPE_RX_DATA_IND:
266 		{
267 			uint16_t vdev_id, msdu_cnt;
268 			uint16_t peer_id, frag_ind;
269 
270 			peer_id = HTT_RX_DATA_IND_PEER_ID_GET(*msg_word);
271 			frag_ind = HTT_RX_DATA_IND_FRAG_GET(*(msg_word + 1));
272 			vdev_id = HTT_RX_DATA_IND_VDEV_ID_GET(*msg_word);
273 
274 			if (qdf_unlikely(frag_ind)) {
275 				dp_rx_frag_indication_handler(soc->dp_soc,
276 							      htt_t2h_msg,
277 							      vdev_id, peer_id);
278 				break;
279 			}
280 
281 			msdu_cnt =
282 				HTT_RX_DATA_IND_MSDU_CNT_GET(*(msg_word + 1));
283 			dp_rx_data_indication_handler(soc->dp_soc, htt_t2h_msg,
284 						      vdev_id, peer_id,
285 						      msdu_cnt);
286 			break;
287 		}
288 		case HTT_T2H_MSG_TYPE_SOFT_UMAC_TX_COMPL_IND:
289 		{
290 			uint32_t num_msdus;
291 
292 			num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
293 
294 			if ((num_msdus * HTT_TX_MSDU_INFO_SIZE +
295 			     HTT_SOFT_UMAC_TX_COMPL_IND_SIZE) > msg_len) {
296 				dp_htt_err("Invalid msdu count in tx compl indication %d", num_msdus);
297 				break;
298 			}
299 
300 			dp_tx_compl_handler_rh(soc->dp_soc, htt_t2h_msg);
301 			break;
302 		}
303 		case HTT_T2H_MSG_TYPE_RX_PN_IND:
304 		{
305 			/* TODO check and add PN IND handling */
306 			break;
307 		}
308 		case HTT_T2H_MSG_TYPE_RX_ADDBA:
309 		{
310 			uint16_t peer_id;
311 			uint8_t tid;
312 			uint16_t win_sz;
313 
314 			/*
315 			 * Update REO Queue Desc with new values
316 			 */
317 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
318 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
319 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
320 
321 			/*
322 			 * Window size needs to be incremented by 1
323 			 * since fw needs to represent a value of 256
324 			 * using just 8 bits
325 			 */
326 			dp_htt_rx_addba_handler_rh(soc->dp_soc, peer_id,
327 						   tid, win_sz + 1);
328 			break;
329 		}
330 		case HTT_T2H_MSG_TYPE_RX_DELBA:
331 		{
332 			uint16_t peer_id;
333 			uint8_t tid;
334 			uint8_t win_sz;
335 			QDF_STATUS status;
336 
337 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
338 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
339 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
340 
341 			status = dp_htt_rx_delba_ind_handler_rh(soc->dp_soc,
342 								peer_id, tid,
343 								win_sz);
344 
345 			dp_htt_info("DELBA PeerID %d BAW %d TID %d stat %d",
346 				    peer_id, win_sz, tid, status);
347 			break;
348 		}
349 		case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
350 		{
351 			qdf_nbuf_t nbuf_copy;
352 			HTC_PACKET htc_pkt = {0};
353 
354 			nbuf_copy = qdf_nbuf_copy(htt_t2h_msg);
355 			if (qdf_unlikely(!nbuf_copy)) {
356 				dp_htt_err("NBUF copy failed for PPDU stats msg");
357 				break;
358 			}
359 			htc_pkt.Status = QDF_STATUS_SUCCESS;
360 			htc_pkt.pPktContext = (void *)nbuf_copy;
361 			dp_htt_t2h_msg_handler(context, &htc_pkt);
362 			break;
363 		}
364 		case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
365 		{
366 			uint8_t num_flows;
367 			struct htt_flow_pool_map_payload_t *pool_map;
368 
369 			num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
370 
371 			if (((HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
372 			      HTT_FLOW_POOL_MAP_HEADER_SZ) * num_flows + 1) * sizeof(*msg_word) > msg_len) {
373 				dp_htt_err("Invalid flow count in flow pool map message");
374 				WARN_ON(1);
375 				break;
376 			}
377 
378 			msg_word++;
379 
380 			while (num_flows) {
381 				pool_map = (struct htt_flow_pool_map_payload_t *)msg_word;
382 				dp_htt_flow_pool_map_handler_rh(
383 					soc->dp_soc, pool_map->flow_id,
384 					pool_map->flow_type,
385 					pool_map->flow_pool_id,
386 					pool_map->flow_pool_size);
387 
388 				msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
389 							 HTT_FLOW_POOL_MAP_HEADER_SZ);
390 				num_flows--;
391 			}
392 
393 			break;
394 		}
395 		case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
396 		{
397 			struct htt_flow_pool_unmap_t *pool_unmap;
398 
399 			if (msg_len < sizeof(struct htt_flow_pool_unmap_t)) {
400 				dp_htt_err("Invalid length in flow pool unmap message %d", msg_len);
401 				WARN_ON(1);
402 				break;
403 			}
404 
405 			pool_unmap = (struct htt_flow_pool_unmap_t *)msg_word;
406 			dp_htt_flow_pool_unmap_handler_rh(
407 				soc->dp_soc, pool_unmap->flow_id,
408 				pool_unmap->flow_type,
409 				pool_unmap->flow_pool_id);
410 			break;
411 		}
412 		default:
413 		{
414 			HTC_PACKET htc_pkt = {0};
415 
416 			htc_pkt.Status = QDF_STATUS_SUCCESS;
417 			htc_pkt.pPktContext = (void *)htt_t2h_msg;
418 			/*
419 			 * Increment user count to protect buffer
420 			 * from generic handler free count will be
421 			 * reset to 1 during MSG_BUF_REINIT
422 			 */
423 			qdf_nbuf_inc_users(htt_t2h_msg);
424 			dp_htt_t2h_msg_handler(context, &htc_pkt);
425 			break;
426 		}
427 		}
428 
429 		/* Re-initialize the indication buffer */
430 		HTT_T2H_MSG_BUF_REINIT(htt_t2h_msg, soc->osdev);
431 		qdf_nbuf_set_pktlen(htt_t2h_msg, 0);
432 	}
433 }
434 
435 static QDF_STATUS
dp_htt_htc_attach(struct htt_soc * soc,uint16_t service_id)436 dp_htt_htc_attach(struct htt_soc *soc, uint16_t service_id)
437 {
438 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc->dp_soc);
439 	struct htc_service_connect_req connect;
440 	struct htc_service_connect_resp response;
441 	QDF_STATUS status;
442 
443 	qdf_mem_zero(&connect, sizeof(connect));
444 	qdf_mem_zero(&response, sizeof(response));
445 
446 	connect.pMetaData = NULL;
447 	connect.MetaDataLength = 0;
448 	connect.EpCallbacks.pContext = soc;
449 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
450 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
451 	/* fastpath handler will be used instead */
452 	connect.EpCallbacks.EpRecv = NULL;
453 
454 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
455 	connect.EpCallbacks.EpRecvRefill = NULL;
456 	/* N/A, fill is done by HIF */
457 	connect.EpCallbacks.RecvRefillWaterMark = 1;
458 
459 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
460 	/*
461 	 * Specify how deep to let a queue get before htc_send_pkt will
462 	 * call the EpSendFull function due to excessive send queue depth.
463 	 */
464 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
465 
466 	/* disable flow control for HTT data message service */
467 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
468 
469 	/* connect to control service */
470 	connect.service_id = service_id;
471 
472 	status = htc_connect_service(soc->htc_soc, &connect, &response);
473 
474 	if (status != QDF_STATUS_SUCCESS) {
475 		dp_htt_err("HTC connect svc failed for id:%u", service_id);
476 		return status;
477 	}
478 
479 	if (service_id == HTT_DATA_MSG_SVC)
480 		soc->htc_endpoint = response.Endpoint;
481 
482 	/* Save the EP_ID of the TX pipe that to be used during TX enqueue */
483 	if (service_id == HTT_DATA2_MSG_SVC)
484 		rh_soc->tx_endpoint = response.Endpoint;
485 
486 	return QDF_STATUS_SUCCESS;
487 }
488 
489 static QDF_STATUS
dp_htt_htc_soc_attach_all(struct htt_soc * soc)490 dp_htt_htc_soc_attach_all(struct htt_soc *soc)
491 {
492 	struct dp_soc *dp_soc = soc->dp_soc;
493 	int svc_list[3] = {HTT_DATA_MSG_SVC, HTT_DATA2_MSG_SVC,
494 		HTT_DATA3_MSG_SVC};
495 	QDF_STATUS status;
496 	int i;
497 
498 	for (i = 0; i < QDF_ARRAY_SIZE(svc_list); i++) {
499 		status = dp_htt_htc_attach(soc, svc_list[i]);
500 		if (QDF_IS_STATUS_ERROR(status))
501 			return status;
502 	}
503 
504 	dp_hif_update_pipe_callback(dp_soc, (void *)soc,
505 				    dp_htt_hif_t2h_hp_callback,
506 				    DP_HTT_T2H_HP_PIPE);
507 
508 	/* Register fastpath cb handlers for RX CE's */
509 	if (hif_ce_fastpath_cb_register(dp_soc->hif_handle,
510 					dp_htt_t2h_msg_handler_fast, soc)) {
511 		dp_htt_err("failed to register fastpath callback");
512 		return QDF_STATUS_E_FAILURE;
513 	}
514 
515 	return QDF_STATUS_SUCCESS;
516 }
517 
518 /*
519  * dp_htt_soc_initialize_rh() - SOC level HTT initialization
520  * @htt_soc: Opaque htt SOC handle
521  * @ctrl_psoc: Opaque ctrl SOC handle
522  * @htc_soc: SOC level HTC handle
523  * @hal_soc: Opaque HAL SOC handle
524  * @osdev: QDF device
525  *
526  * Return: HTT handle on success; NULL on failure
527  */
528 void *
dp_htt_soc_initialize_rh(struct htt_soc * htt_soc,struct cdp_ctrl_objmgr_psoc * ctrl_psoc,HTC_HANDLE htc_soc,hal_soc_handle_t hal_soc_hdl,qdf_device_t osdev)529 dp_htt_soc_initialize_rh(struct htt_soc *htt_soc,
530 			 struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
531 			 HTC_HANDLE htc_soc,
532 			 hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
533 {
534 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
535 
536 	soc->osdev = osdev;
537 	soc->ctrl_psoc = ctrl_psoc;
538 	soc->htc_soc = htc_soc;
539 	soc->hal_soc = hal_soc_hdl;
540 
541 	if (dp_htt_htc_soc_attach_all(soc))
542 		goto fail2;
543 
544 	return soc;
545 
546 fail2:
547 	return NULL;
548 }
549