1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <crypto/hash.h>
8 #include "core.h"
9 #include "dp_tx.h"
10 #include "hal_tx.h"
11 #include "hif.h"
12 #include "debug.h"
13 #include "dp_rx.h"
14 #include "peer.h"
15 #include "dp_mon.h"
16
17 enum ath12k_dp_desc_type {
18 ATH12K_DP_TX_DESC,
19 ATH12K_DP_RX_DESC,
20 };
21
ath12k_dp_htt_htc_tx_complete(struct ath12k_base * ab,struct sk_buff * skb)22 static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
23 struct sk_buff *skb)
24 {
25 dev_kfree_skb_any(skb);
26 }
27
ath12k_dp_peer_cleanup(struct ath12k * ar,int vdev_id,const u8 * addr)28 void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
29 {
30 struct ath12k_base *ab = ar->ab;
31 struct ath12k_peer *peer;
32
33 /* TODO: Any other peer specific DP cleanup */
34
35 spin_lock_bh(&ab->base_lock);
36 peer = ath12k_peer_find(ab, vdev_id, addr);
37 if (!peer) {
38 ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
39 addr, vdev_id);
40 spin_unlock_bh(&ab->base_lock);
41 return;
42 }
43
44 ath12k_dp_rx_peer_tid_cleanup(ar, peer);
45 crypto_free_shash(peer->tfm_mmic);
46 peer->dp_setup_done = false;
47 spin_unlock_bh(&ab->base_lock);
48 }
49
ath12k_dp_peer_setup(struct ath12k * ar,int vdev_id,const u8 * addr)50 int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
51 {
52 struct ath12k_base *ab = ar->ab;
53 struct ath12k_peer *peer;
54 u32 reo_dest;
55 int ret = 0, tid;
56
57 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
58 reo_dest = ar->dp.mac_id + 1;
59 ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
60 WMI_PEER_SET_DEFAULT_ROUTING,
61 DP_RX_HASH_ENABLE | (reo_dest << 1));
62
63 if (ret) {
64 ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
65 ret, addr, vdev_id);
66 return ret;
67 }
68
69 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
70 ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
71 HAL_PN_TYPE_NONE);
72 if (ret) {
73 ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
74 tid, ret);
75 goto peer_clean;
76 }
77 }
78
79 ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
80 if (ret) {
81 ath12k_warn(ab, "failed to setup rx defrag context\n");
82 goto peer_clean;
83 }
84
85 /* TODO: Setup other peer specific resource used in data path */
86
87 return 0;
88
89 peer_clean:
90 spin_lock_bh(&ab->base_lock);
91
92 peer = ath12k_peer_find(ab, vdev_id, addr);
93 if (!peer) {
94 ath12k_warn(ab, "failed to find the peer to del rx tid\n");
95 spin_unlock_bh(&ab->base_lock);
96 return -ENOENT;
97 }
98
99 for (; tid >= 0; tid--)
100 ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
101
102 spin_unlock_bh(&ab->base_lock);
103
104 return ret;
105 }
106
ath12k_dp_srng_cleanup(struct ath12k_base * ab,struct dp_srng * ring)107 void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
108 {
109 if (!ring->vaddr_unaligned)
110 return;
111
112 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
113 ring->paddr_unaligned);
114
115 ring->vaddr_unaligned = NULL;
116 }
117
ath12k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)118 static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
119 {
120 int ext_group_num;
121 u8 mask = 1 << ring_num;
122
123 for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
124 ext_group_num++) {
125 if (mask & grp_mask[ext_group_num])
126 return ext_group_num;
127 }
128
129 return -ENOENT;
130 }
131
ath12k_dp_srng_calculate_msi_group(struct ath12k_base * ab,enum hal_ring_type type,int ring_num)132 static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
133 enum hal_ring_type type, int ring_num)
134 {
135 const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
136 const u8 *grp_mask;
137 int i;
138
139 switch (type) {
140 case HAL_WBM2SW_RELEASE:
141 if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
142 grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
143 ring_num = 0;
144 } else {
145 map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
146 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
147 if (ring_num == map[i].wbm_ring_num) {
148 ring_num = i;
149 break;
150 }
151 }
152
153 grp_mask = &ab->hw_params->ring_mask->tx[0];
154 }
155 break;
156 case HAL_REO_EXCEPTION:
157 grp_mask = &ab->hw_params->ring_mask->rx_err[0];
158 break;
159 case HAL_REO_DST:
160 grp_mask = &ab->hw_params->ring_mask->rx[0];
161 break;
162 case HAL_REO_STATUS:
163 grp_mask = &ab->hw_params->ring_mask->reo_status[0];
164 break;
165 case HAL_RXDMA_MONITOR_STATUS:
166 case HAL_RXDMA_MONITOR_DST:
167 grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
168 break;
169 case HAL_TX_MONITOR_DST:
170 grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
171 break;
172 case HAL_RXDMA_BUF:
173 grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
174 break;
175 case HAL_RXDMA_MONITOR_BUF:
176 case HAL_TCL_DATA:
177 case HAL_TCL_CMD:
178 case HAL_REO_CMD:
179 case HAL_SW2WBM_RELEASE:
180 case HAL_WBM_IDLE_LINK:
181 case HAL_TCL_STATUS:
182 case HAL_REO_REINJECT:
183 case HAL_CE_SRC:
184 case HAL_CE_DST:
185 case HAL_CE_DST_STATUS:
186 default:
187 return -ENOENT;
188 }
189
190 return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
191 }
192
ath12k_dp_srng_msi_setup(struct ath12k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)193 static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
194 struct hal_srng_params *ring_params,
195 enum hal_ring_type type, int ring_num)
196 {
197 int msi_group_number, msi_data_count;
198 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
199 int ret;
200
201 ret = ath12k_hif_get_user_msi_vector(ab, "DP",
202 &msi_data_count, &msi_data_start,
203 &msi_irq_start);
204 if (ret)
205 return;
206
207 msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
208 ring_num);
209 if (msi_group_number < 0) {
210 ath12k_dbg(ab, ATH12K_DBG_PCI,
211 "ring not part of an ext_group; ring_type: %d,ring_num %d",
212 type, ring_num);
213 ring_params->msi_addr = 0;
214 ring_params->msi_data = 0;
215 return;
216 }
217
218 if (msi_group_number > msi_data_count) {
219 ath12k_dbg(ab, ATH12K_DBG_PCI,
220 "multiple msi_groups share one msi, msi_group_num %d",
221 msi_group_number);
222 }
223
224 ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
225
226 ring_params->msi_addr = addr_lo;
227 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
228 ring_params->msi_data = (msi_group_number % msi_data_count)
229 + msi_data_start;
230 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
231 }
232
ath12k_dp_srng_setup(struct ath12k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)233 int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
234 enum hal_ring_type type, int ring_num,
235 int mac_id, int num_entries)
236 {
237 struct hal_srng_params params = { 0 };
238 int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
239 int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
240 int ret;
241
242 if (max_entries < 0 || entry_sz < 0)
243 return -EINVAL;
244
245 if (num_entries > max_entries)
246 num_entries = max_entries;
247
248 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
249 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
250 &ring->paddr_unaligned,
251 GFP_KERNEL);
252 if (!ring->vaddr_unaligned)
253 return -ENOMEM;
254
255 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
256 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
257 (unsigned long)ring->vaddr_unaligned);
258
259 params.ring_base_vaddr = ring->vaddr;
260 params.ring_base_paddr = ring->paddr;
261 params.num_entries = num_entries;
262 ath12k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
263
264 switch (type) {
265 case HAL_REO_DST:
266 params.intr_batch_cntr_thres_entries =
267 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
268 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
269 break;
270 case HAL_RXDMA_BUF:
271 case HAL_RXDMA_MONITOR_BUF:
272 case HAL_RXDMA_MONITOR_STATUS:
273 params.low_threshold = num_entries >> 3;
274 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
275 params.intr_batch_cntr_thres_entries = 0;
276 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
277 break;
278 case HAL_TX_MONITOR_DST:
279 params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
280 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
281 params.intr_batch_cntr_thres_entries = 0;
282 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
283 break;
284 case HAL_WBM2SW_RELEASE:
285 if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
286 params.intr_batch_cntr_thres_entries =
287 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
288 params.intr_timer_thres_us =
289 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
290 break;
291 }
292 /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
293 fallthrough;
294 case HAL_REO_EXCEPTION:
295 case HAL_REO_REINJECT:
296 case HAL_REO_CMD:
297 case HAL_REO_STATUS:
298 case HAL_TCL_DATA:
299 case HAL_TCL_CMD:
300 case HAL_TCL_STATUS:
301 case HAL_WBM_IDLE_LINK:
302 case HAL_SW2WBM_RELEASE:
303 case HAL_RXDMA_DST:
304 case HAL_RXDMA_MONITOR_DST:
305 case HAL_RXDMA_MONITOR_DESC:
306 params.intr_batch_cntr_thres_entries =
307 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
308 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
309 break;
310 case HAL_RXDMA_DIR_BUF:
311 break;
312 default:
313 ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
314 return -EINVAL;
315 }
316
317 ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
318 if (ret < 0) {
319 ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
320 ret, ring_num);
321 return ret;
322 }
323
324 ring->ring_id = ret;
325
326 return 0;
327 }
328
329 static
ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base * ab,struct ath12k_vif * arvif)330 u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
331 {
332 u32 bank_config = 0;
333
334 /* Only valid for raw frames with HW crypto enabled.
335 * With SW crypto, mac80211 sets key per packet
336 */
337 if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
338 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
339 bank_config |=
340 u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
341 HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
342
343 bank_config |= u32_encode_bits(arvif->tx_encap_type,
344 HAL_TX_BANK_CONFIG_ENCAP_TYPE);
345 bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
346 u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
347 u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
348
349 /* only valid if idx_lookup_override is not set in tcl_data_cmd */
350 bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
351
352 bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
353 HAL_TX_BANK_CONFIG_ADDRX_EN) |
354 u32_encode_bits(!!(arvif->hal_addr_search_flags &
355 HAL_TX_ADDRY_EN),
356 HAL_TX_BANK_CONFIG_ADDRY_EN);
357
358 bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
359 HAL_TX_BANK_CONFIG_MESH_EN) |
360 u32_encode_bits(arvif->vdev_id_check_en,
361 HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
362
363 bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
364
365 return bank_config;
366 }
367
ath12k_dp_tx_get_bank_profile(struct ath12k_base * ab,struct ath12k_vif * arvif,struct ath12k_dp * dp)368 static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
369 struct ath12k_dp *dp)
370 {
371 int bank_id = DP_INVALID_BANK_ID;
372 int i;
373 u32 bank_config;
374 bool configure_register = false;
375
376 /* convert vdev params into hal_tx_bank_config */
377 bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
378
379 spin_lock_bh(&dp->tx_bank_lock);
380 /* TODO: implement using idr kernel framework*/
381 for (i = 0; i < dp->num_bank_profiles; i++) {
382 if (dp->bank_profiles[i].is_configured &&
383 (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
384 bank_id = i;
385 goto inc_ref_and_return;
386 }
387 if (!dp->bank_profiles[i].is_configured ||
388 !dp->bank_profiles[i].num_users) {
389 bank_id = i;
390 goto configure_and_return;
391 }
392 }
393
394 if (bank_id == DP_INVALID_BANK_ID) {
395 spin_unlock_bh(&dp->tx_bank_lock);
396 ath12k_err(ab, "unable to find TX bank!");
397 return bank_id;
398 }
399
400 configure_and_return:
401 dp->bank_profiles[bank_id].is_configured = true;
402 dp->bank_profiles[bank_id].bank_config = bank_config;
403 configure_register = true;
404 inc_ref_and_return:
405 dp->bank_profiles[bank_id].num_users++;
406 spin_unlock_bh(&dp->tx_bank_lock);
407
408 if (configure_register)
409 ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
410
411 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
412 bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
413 dp->bank_profiles[bank_id].num_users);
414
415 return bank_id;
416 }
417
ath12k_dp_tx_put_bank_profile(struct ath12k_dp * dp,u8 bank_id)418 void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
419 {
420 spin_lock_bh(&dp->tx_bank_lock);
421 dp->bank_profiles[bank_id].num_users--;
422 spin_unlock_bh(&dp->tx_bank_lock);
423 }
424
ath12k_dp_deinit_bank_profiles(struct ath12k_base * ab)425 static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
426 {
427 struct ath12k_dp *dp = &ab->dp;
428
429 kfree(dp->bank_profiles);
430 dp->bank_profiles = NULL;
431 }
432
ath12k_dp_init_bank_profiles(struct ath12k_base * ab)433 static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
434 {
435 struct ath12k_dp *dp = &ab->dp;
436 u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
437 int i;
438
439 dp->num_bank_profiles = num_tcl_banks;
440 dp->bank_profiles = kmalloc_array(num_tcl_banks,
441 sizeof(struct ath12k_dp_tx_bank_profile),
442 GFP_KERNEL);
443 if (!dp->bank_profiles)
444 return -ENOMEM;
445
446 spin_lock_init(&dp->tx_bank_lock);
447
448 for (i = 0; i < num_tcl_banks; i++) {
449 dp->bank_profiles[i].is_configured = false;
450 dp->bank_profiles[i].num_users = 0;
451 }
452
453 return 0;
454 }
455
ath12k_dp_srng_common_cleanup(struct ath12k_base * ab)456 static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
457 {
458 struct ath12k_dp *dp = &ab->dp;
459 int i;
460
461 ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
462 ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
463 ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
464 ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
465 ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
466 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
467 ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
468 ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
469 }
470 ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
471 }
472
ath12k_dp_srng_common_setup(struct ath12k_base * ab)473 static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
474 {
475 struct ath12k_dp *dp = &ab->dp;
476 const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
477 struct hal_srng *srng;
478 int i, ret, tx_comp_ring_num;
479 u32 ring_hash_map;
480
481 ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
482 HAL_SW2WBM_RELEASE, 0, 0,
483 DP_WBM_RELEASE_RING_SIZE);
484 if (ret) {
485 ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
486 ret);
487 goto err;
488 }
489
490 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
491 map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
492 tx_comp_ring_num = map[i].wbm_ring_num;
493
494 ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
495 HAL_TCL_DATA, i, 0,
496 DP_TCL_DATA_RING_SIZE);
497 if (ret) {
498 ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
499 i, ret);
500 goto err;
501 }
502
503 ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
504 HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
505 DP_TX_COMP_RING_SIZE);
506 if (ret) {
507 ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
508 tx_comp_ring_num, ret);
509 goto err;
510 }
511 }
512
513 ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
514 0, 0, DP_REO_REINJECT_RING_SIZE);
515 if (ret) {
516 ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
517 ret);
518 goto err;
519 }
520
521 ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
522 HAL_WBM2SW_REL_ERR_RING_NUM, 0,
523 DP_RX_RELEASE_RING_SIZE);
524 if (ret) {
525 ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
526 goto err;
527 }
528
529 ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
530 0, 0, DP_REO_EXCEPTION_RING_SIZE);
531 if (ret) {
532 ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
533 ret);
534 goto err;
535 }
536
537 ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
538 0, 0, DP_REO_CMD_RING_SIZE);
539 if (ret) {
540 ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
541 goto err;
542 }
543
544 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
545 ath12k_hal_reo_init_cmd_ring(ab, srng);
546
547 ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
548 0, 0, DP_REO_STATUS_RING_SIZE);
549 if (ret) {
550 ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
551 goto err;
552 }
553
554 /* When hash based routing of rx packet is enabled, 32 entries to map
555 * the hash values to the ring will be configured. Each hash entry uses
556 * four bits to map to a particular ring. The ring mapping will be
557 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
558 * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
559 */
560 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
561 HAL_HASH_ROUTING_RING_SW2 << 4 |
562 HAL_HASH_ROUTING_RING_SW3 << 8 |
563 HAL_HASH_ROUTING_RING_SW4 << 12 |
564 HAL_HASH_ROUTING_RING_SW1 << 16 |
565 HAL_HASH_ROUTING_RING_SW2 << 20 |
566 HAL_HASH_ROUTING_RING_SW3 << 24 |
567 HAL_HASH_ROUTING_RING_SW4 << 28;
568
569 ath12k_hal_reo_hw_setup(ab, ring_hash_map);
570
571 return 0;
572
573 err:
574 ath12k_dp_srng_common_cleanup(ab);
575
576 return ret;
577 }
578
ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base * ab)579 static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
580 {
581 struct ath12k_dp *dp = &ab->dp;
582 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
583 int i;
584
585 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
586 if (!slist[i].vaddr)
587 continue;
588
589 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
590 slist[i].vaddr, slist[i].paddr);
591 slist[i].vaddr = NULL;
592 }
593 }
594
ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)595 static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
596 int size,
597 u32 n_link_desc_bank,
598 u32 n_link_desc,
599 u32 last_bank_sz)
600 {
601 struct ath12k_dp *dp = &ab->dp;
602 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
603 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
604 u32 n_entries_per_buf;
605 int num_scatter_buf, scatter_idx;
606 struct hal_wbm_link_desc *scatter_buf;
607 int align_bytes, n_entries;
608 dma_addr_t paddr;
609 int rem_entries;
610 int i;
611 int ret = 0;
612 u32 end_offset, cookie;
613 enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
614
615 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
616 ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
617 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
618
619 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
620 return -EINVAL;
621
622 for (i = 0; i < num_scatter_buf; i++) {
623 slist[i].vaddr = dma_alloc_coherent(ab->dev,
624 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
625 &slist[i].paddr, GFP_KERNEL);
626 if (!slist[i].vaddr) {
627 ret = -ENOMEM;
628 goto err;
629 }
630 }
631
632 scatter_idx = 0;
633 scatter_buf = slist[scatter_idx].vaddr;
634 rem_entries = n_entries_per_buf;
635
636 for (i = 0; i < n_link_desc_bank; i++) {
637 align_bytes = link_desc_banks[i].vaddr -
638 link_desc_banks[i].vaddr_unaligned;
639 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
640 HAL_LINK_DESC_SIZE;
641 paddr = link_desc_banks[i].paddr;
642 while (n_entries) {
643 cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
644 ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
645 paddr, rbm);
646 n_entries--;
647 paddr += HAL_LINK_DESC_SIZE;
648 if (rem_entries) {
649 rem_entries--;
650 scatter_buf++;
651 continue;
652 }
653
654 rem_entries = n_entries_per_buf;
655 scatter_idx++;
656 scatter_buf = slist[scatter_idx].vaddr;
657 }
658 }
659
660 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
661 sizeof(struct hal_wbm_link_desc);
662 ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
663 n_link_desc, end_offset);
664
665 return 0;
666
667 err:
668 ath12k_dp_scatter_idle_link_desc_cleanup(ab);
669
670 return ret;
671 }
672
673 static void
ath12k_dp_link_desc_bank_free(struct ath12k_base * ab,struct dp_link_desc_bank * link_desc_banks)674 ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
675 struct dp_link_desc_bank *link_desc_banks)
676 {
677 int i;
678
679 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
680 if (link_desc_banks[i].vaddr_unaligned) {
681 dma_free_coherent(ab->dev,
682 link_desc_banks[i].size,
683 link_desc_banks[i].vaddr_unaligned,
684 link_desc_banks[i].paddr_unaligned);
685 link_desc_banks[i].vaddr_unaligned = NULL;
686 }
687 }
688 }
689
ath12k_dp_link_desc_bank_alloc(struct ath12k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)690 static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
691 struct dp_link_desc_bank *desc_bank,
692 int n_link_desc_bank,
693 int last_bank_sz)
694 {
695 struct ath12k_dp *dp = &ab->dp;
696 int i;
697 int ret = 0;
698 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
699
700 for (i = 0; i < n_link_desc_bank; i++) {
701 if (i == (n_link_desc_bank - 1) && last_bank_sz)
702 desc_sz = last_bank_sz;
703
704 desc_bank[i].vaddr_unaligned =
705 dma_alloc_coherent(ab->dev, desc_sz,
706 &desc_bank[i].paddr_unaligned,
707 GFP_KERNEL);
708 if (!desc_bank[i].vaddr_unaligned) {
709 ret = -ENOMEM;
710 goto err;
711 }
712
713 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
714 HAL_LINK_DESC_ALIGN);
715 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
716 ((unsigned long)desc_bank[i].vaddr -
717 (unsigned long)desc_bank[i].vaddr_unaligned);
718 desc_bank[i].size = desc_sz;
719 }
720
721 return 0;
722
723 err:
724 ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
725
726 return ret;
727 }
728
ath12k_dp_link_desc_cleanup(struct ath12k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)729 void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
730 struct dp_link_desc_bank *desc_bank,
731 u32 ring_type, struct dp_srng *ring)
732 {
733 ath12k_dp_link_desc_bank_free(ab, desc_bank);
734
735 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
736 ath12k_dp_srng_cleanup(ab, ring);
737 ath12k_dp_scatter_idle_link_desc_cleanup(ab);
738 }
739 }
740
ath12k_wbm_idle_ring_setup(struct ath12k_base * ab,u32 * n_link_desc)741 static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
742 {
743 struct ath12k_dp *dp = &ab->dp;
744 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
745 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
746 int ret = 0;
747
748 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
749 HAL_NUM_MPDUS_PER_LINK_DESC;
750
751 n_mpdu_queue_desc = n_mpdu_link_desc /
752 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
753
754 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
755 DP_AVG_MSDUS_PER_FLOW) /
756 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
757
758 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
759 DP_AVG_MSDUS_PER_MPDU) /
760 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
761
762 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
763 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
764
765 if (*n_link_desc & (*n_link_desc - 1))
766 *n_link_desc = 1 << fls(*n_link_desc);
767
768 ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
769 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
770 if (ret) {
771 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
772 return ret;
773 }
774 return ret;
775 }
776
ath12k_dp_link_desc_setup(struct ath12k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)777 int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
778 struct dp_link_desc_bank *link_desc_banks,
779 u32 ring_type, struct hal_srng *srng,
780 u32 n_link_desc)
781 {
782 u32 tot_mem_sz;
783 u32 n_link_desc_bank, last_bank_sz;
784 u32 entry_sz, align_bytes, n_entries;
785 struct hal_wbm_link_desc *desc;
786 u32 paddr;
787 int i, ret;
788 u32 cookie;
789 enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
790
791 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
792 tot_mem_sz += HAL_LINK_DESC_ALIGN;
793
794 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
795 n_link_desc_bank = 1;
796 last_bank_sz = tot_mem_sz;
797 } else {
798 n_link_desc_bank = tot_mem_sz /
799 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
800 HAL_LINK_DESC_ALIGN);
801 last_bank_sz = tot_mem_sz %
802 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
803 HAL_LINK_DESC_ALIGN);
804
805 if (last_bank_sz)
806 n_link_desc_bank += 1;
807 }
808
809 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
810 return -EINVAL;
811
812 ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
813 n_link_desc_bank, last_bank_sz);
814 if (ret)
815 return ret;
816
817 /* Setup link desc idle list for HW internal usage */
818 entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
819 tot_mem_sz = entry_sz * n_link_desc;
820
821 /* Setup scatter desc list when the total memory requirement is more */
822 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
823 ring_type != HAL_RXDMA_MONITOR_DESC) {
824 ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
825 n_link_desc_bank,
826 n_link_desc,
827 last_bank_sz);
828 if (ret) {
829 ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
830 ret);
831 goto fail_desc_bank_free;
832 }
833
834 return 0;
835 }
836
837 spin_lock_bh(&srng->lock);
838
839 ath12k_hal_srng_access_begin(ab, srng);
840
841 for (i = 0; i < n_link_desc_bank; i++) {
842 align_bytes = link_desc_banks[i].vaddr -
843 link_desc_banks[i].vaddr_unaligned;
844 n_entries = (link_desc_banks[i].size - align_bytes) /
845 HAL_LINK_DESC_SIZE;
846 paddr = link_desc_banks[i].paddr;
847 while (n_entries &&
848 (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
849 cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
850 ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
851 n_entries--;
852 paddr += HAL_LINK_DESC_SIZE;
853 }
854 }
855
856 ath12k_hal_srng_access_end(ab, srng);
857
858 spin_unlock_bh(&srng->lock);
859
860 return 0;
861
862 fail_desc_bank_free:
863 ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
864
865 return ret;
866 }
867
ath12k_dp_service_srng(struct ath12k_base * ab,struct ath12k_ext_irq_grp * irq_grp,int budget)868 int ath12k_dp_service_srng(struct ath12k_base *ab,
869 struct ath12k_ext_irq_grp *irq_grp,
870 int budget)
871 {
872 struct napi_struct *napi = &irq_grp->napi;
873 int grp_id = irq_grp->grp_id;
874 int work_done = 0;
875 int i = 0, j;
876 int tot_work_done = 0;
877 enum dp_monitor_mode monitor_mode;
878 u8 ring_mask;
879
880 if (ab->hw_params->ring_mask->tx[grp_id]) {
881 i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
882 ath12k_dp_tx_completion_handler(ab, i);
883 }
884
885 if (ab->hw_params->ring_mask->rx_err[grp_id]) {
886 work_done = ath12k_dp_rx_process_err(ab, napi, budget);
887 budget -= work_done;
888 tot_work_done += work_done;
889 if (budget <= 0)
890 goto done;
891 }
892
893 if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
894 work_done = ath12k_dp_rx_process_wbm_err(ab,
895 napi,
896 budget);
897 budget -= work_done;
898 tot_work_done += work_done;
899
900 if (budget <= 0)
901 goto done;
902 }
903
904 if (ab->hw_params->ring_mask->rx[grp_id]) {
905 i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
906 work_done = ath12k_dp_rx_process(ab, i, napi,
907 budget);
908 budget -= work_done;
909 tot_work_done += work_done;
910 if (budget <= 0)
911 goto done;
912 }
913
914 if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
915 monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
916 ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
917 for (i = 0; i < ab->num_radios; i++) {
918 for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
919 int id = i * ab->hw_params->num_rxdma_per_pdev + j;
920
921 if (ring_mask & BIT(id)) {
922 work_done =
923 ath12k_dp_mon_process_ring(ab, id, napi, budget,
924 monitor_mode);
925 budget -= work_done;
926 tot_work_done += work_done;
927
928 if (budget <= 0)
929 goto done;
930 }
931 }
932 }
933 }
934
935 if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
936 monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
937 ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
938 for (i = 0; i < ab->num_radios; i++) {
939 for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
940 int id = i * ab->hw_params->num_rxdma_per_pdev + j;
941
942 if (ring_mask & BIT(id)) {
943 work_done =
944 ath12k_dp_mon_process_ring(ab, id, napi, budget,
945 monitor_mode);
946 budget -= work_done;
947 tot_work_done += work_done;
948
949 if (budget <= 0)
950 goto done;
951 }
952 }
953 }
954 }
955
956 if (ab->hw_params->ring_mask->reo_status[grp_id])
957 ath12k_dp_rx_process_reo_status(ab);
958
959 if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
960 struct ath12k_dp *dp = &ab->dp;
961 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
962 LIST_HEAD(list);
963
964 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
965 }
966
967 /* TODO: Implement handler for other interrupts */
968
969 done:
970 return tot_work_done;
971 }
972
ath12k_dp_pdev_free(struct ath12k_base * ab)973 void ath12k_dp_pdev_free(struct ath12k_base *ab)
974 {
975 int i;
976
977 del_timer_sync(&ab->mon_reap_timer);
978
979 for (i = 0; i < ab->num_radios; i++)
980 ath12k_dp_rx_pdev_free(ab, i);
981 }
982
ath12k_dp_pdev_pre_alloc(struct ath12k_base * ab)983 void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
984 {
985 struct ath12k *ar;
986 struct ath12k_pdev_dp *dp;
987 int i;
988
989 for (i = 0; i < ab->num_radios; i++) {
990 ar = ab->pdevs[i].ar;
991 dp = &ar->dp;
992 dp->mac_id = i;
993 atomic_set(&dp->num_tx_pending, 0);
994 init_waitqueue_head(&dp->tx_empty_waitq);
995
996 /* TODO: Add any RXDMA setup required per pdev */
997 }
998 }
999
ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base * ab)1000 bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
1001 {
1002 if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
1003 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
1004 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
1005 ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
1006 return true;
1007 }
1008 return false;
1009 }
1010
ath12k_dp_hal_rx_desc_init(struct ath12k_base * ab)1011 void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
1012 {
1013 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
1014 /* RX TLVS compaction is supported, hence change the hal_rx_ops
1015 * to compact hal_rx_ops.
1016 */
1017 ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
1018 }
1019 ab->hal.hal_desc_sz =
1020 ab->hal_rx_ops->rx_desc_get_desc_size();
1021 }
1022
ath12k_dp_service_mon_ring(struct timer_list * t)1023 static void ath12k_dp_service_mon_ring(struct timer_list *t)
1024 {
1025 struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
1026 int i;
1027
1028 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
1029 ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
1030 ATH12K_DP_RX_MONITOR_MODE);
1031
1032 mod_timer(&ab->mon_reap_timer, jiffies +
1033 msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
1034 }
1035
ath12k_dp_mon_reap_timer_init(struct ath12k_base * ab)1036 static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
1037 {
1038 if (ab->hw_params->rxdma1_enable)
1039 return;
1040
1041 timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
1042 }
1043
ath12k_dp_pdev_alloc(struct ath12k_base * ab)1044 int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
1045 {
1046 struct ath12k *ar;
1047 int ret;
1048 int i;
1049
1050 ret = ath12k_dp_rx_htt_setup(ab);
1051 if (ret)
1052 goto out;
1053
1054 ath12k_dp_mon_reap_timer_init(ab);
1055
1056 /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
1057 for (i = 0; i < ab->num_radios; i++) {
1058 ar = ab->pdevs[i].ar;
1059 ret = ath12k_dp_rx_pdev_alloc(ab, i);
1060 if (ret) {
1061 ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
1062 i);
1063 goto err;
1064 }
1065 ret = ath12k_dp_rx_pdev_mon_attach(ar);
1066 if (ret) {
1067 ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
1068 goto err;
1069 }
1070 }
1071
1072 return 0;
1073 err:
1074 ath12k_dp_pdev_free(ab);
1075 out:
1076 return ret;
1077 }
1078
ath12k_dp_htt_connect(struct ath12k_dp * dp)1079 int ath12k_dp_htt_connect(struct ath12k_dp *dp)
1080 {
1081 struct ath12k_htc_svc_conn_req conn_req = {0};
1082 struct ath12k_htc_svc_conn_resp conn_resp = {0};
1083 int status;
1084
1085 conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
1086 conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
1087
1088 /* connect to control service */
1089 conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
1090
1091 status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
1092 &conn_resp);
1093
1094 if (status)
1095 return status;
1096
1097 dp->eid = conn_resp.eid;
1098
1099 return 0;
1100 }
1101
ath12k_dp_update_vdev_search(struct ath12k_vif * arvif)1102 static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
1103 {
1104 switch (arvif->vdev_type) {
1105 case WMI_VDEV_TYPE_STA:
1106 /* TODO: Verify the search type and flags since ast hash
1107 * is not part of peer mapv3
1108 */
1109 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
1110 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1111 break;
1112 case WMI_VDEV_TYPE_AP:
1113 case WMI_VDEV_TYPE_IBSS:
1114 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1115 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1116 break;
1117 case WMI_VDEV_TYPE_MONITOR:
1118 default:
1119 return;
1120 }
1121 }
1122
ath12k_dp_vdev_tx_attach(struct ath12k * ar,struct ath12k_vif * arvif)1123 void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
1124 {
1125 struct ath12k_base *ab = ar->ab;
1126
1127 arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
1128 u32_encode_bits(arvif->vdev_id,
1129 HTT_TCL_META_DATA_VDEV_ID) |
1130 u32_encode_bits(ar->pdev->pdev_id,
1131 HTT_TCL_META_DATA_PDEV_ID);
1132
1133 /* set HTT extension valid bit to 0 by default */
1134 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1135
1136 ath12k_dp_update_vdev_search(arvif);
1137 arvif->vdev_id_check_en = true;
1138 arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
1139
1140 /* TODO: error path for bank id failure */
1141 if (arvif->bank_id == DP_INVALID_BANK_ID) {
1142 ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
1143 return;
1144 }
1145 }
1146
ath12k_dp_cc_cleanup(struct ath12k_base * ab)1147 static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
1148 {
1149 struct ath12k_rx_desc_info *desc_info;
1150 struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
1151 struct ath12k_dp *dp = &ab->dp;
1152 struct ath12k_skb_cb *skb_cb;
1153 struct sk_buff *skb;
1154 struct ath12k *ar;
1155 int i, j;
1156 u32 pool_id, tx_spt_page;
1157
1158 if (!dp->spt_info)
1159 return;
1160
1161 /* RX Descriptor cleanup */
1162 spin_lock_bh(&dp->rx_desc_lock);
1163
1164 for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1165 desc_info = dp->spt_info->rxbaddr[i];
1166
1167 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1168 if (!desc_info[j].in_use) {
1169 list_del(&desc_info[j].list);
1170 continue;
1171 }
1172
1173 skb = desc_info[j].skb;
1174 if (!skb)
1175 continue;
1176
1177 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
1178 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
1179 dev_kfree_skb_any(skb);
1180 }
1181 }
1182
1183 for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1184 if (!dp->spt_info->rxbaddr[i])
1185 continue;
1186
1187 kfree(dp->spt_info->rxbaddr[i]);
1188 dp->spt_info->rxbaddr[i] = NULL;
1189 }
1190
1191 spin_unlock_bh(&dp->rx_desc_lock);
1192
1193 /* TX Descriptor cleanup */
1194 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1195 spin_lock_bh(&dp->tx_desc_lock[i]);
1196
1197 list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
1198 list) {
1199 list_del(&tx_desc_info->list);
1200 skb = tx_desc_info->skb;
1201
1202 if (!skb)
1203 continue;
1204
1205 skb_cb = ATH12K_SKB_CB(skb);
1206 ar = skb_cb->ar;
1207 if (atomic_dec_and_test(&ar->dp.num_tx_pending))
1208 wake_up(&ar->dp.tx_empty_waitq);
1209
1210 dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
1211 skb->len, DMA_TO_DEVICE);
1212 dev_kfree_skb_any(skb);
1213 }
1214
1215 spin_unlock_bh(&dp->tx_desc_lock[i]);
1216 }
1217
1218 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1219 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1220
1221 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1222 tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1223 if (!dp->spt_info->txbaddr[tx_spt_page])
1224 continue;
1225
1226 kfree(dp->spt_info->txbaddr[tx_spt_page]);
1227 dp->spt_info->txbaddr[tx_spt_page] = NULL;
1228 }
1229
1230 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1231 }
1232
1233 /* unmap SPT pages */
1234 for (i = 0; i < dp->num_spt_pages; i++) {
1235 if (!dp->spt_info[i].vaddr)
1236 continue;
1237
1238 dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
1239 dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
1240 dp->spt_info[i].vaddr = NULL;
1241 }
1242
1243 kfree(dp->spt_info);
1244 }
1245
ath12k_dp_reoq_lut_cleanup(struct ath12k_base * ab)1246 static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
1247 {
1248 struct ath12k_dp *dp = &ab->dp;
1249
1250 if (!ab->hw_params->reoq_lut_support)
1251 return;
1252
1253 if (!dp->reoq_lut.vaddr)
1254 return;
1255
1256 dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
1257 dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
1258 dp->reoq_lut.vaddr = NULL;
1259
1260 ath12k_hif_write32(ab,
1261 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
1262 }
1263
ath12k_dp_free(struct ath12k_base * ab)1264 void ath12k_dp_free(struct ath12k_base *ab)
1265 {
1266 struct ath12k_dp *dp = &ab->dp;
1267 int i;
1268
1269 ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1270 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1271
1272 ath12k_dp_cc_cleanup(ab);
1273 ath12k_dp_reoq_lut_cleanup(ab);
1274 ath12k_dp_deinit_bank_profiles(ab);
1275 ath12k_dp_srng_common_cleanup(ab);
1276
1277 ath12k_dp_rx_reo_cmd_list_cleanup(ab);
1278
1279 for (i = 0; i < ab->hw_params->max_tx_ring; i++)
1280 kfree(dp->tx_ring[i].tx_status);
1281
1282 ath12k_dp_rx_free(ab);
1283 /* Deinit any SOC level resource */
1284 }
1285
ath12k_dp_cc_config(struct ath12k_base * ab)1286 void ath12k_dp_cc_config(struct ath12k_base *ab)
1287 {
1288 u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1289 u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1290 u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
1291 u32 val = 0;
1292
1293 ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
1294
1295 val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1296 HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1297 u32_encode_bits(ATH12K_CC_PPT_MSB,
1298 HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1299 u32_encode_bits(ATH12K_CC_SPT_MSB,
1300 HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1301 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
1302 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
1303 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
1304
1305 ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
1306
1307 /* Enable HW CC for WBM */
1308 ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
1309
1310 val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1311 HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1312 u32_encode_bits(ATH12K_CC_PPT_MSB,
1313 HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1314 u32_encode_bits(ATH12K_CC_SPT_MSB,
1315 HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1316 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
1317
1318 ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
1319
1320 /* Enable conversion complete indication */
1321 val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
1322 val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
1323 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
1324 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
1325
1326 ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
1327
1328 /* Enable Cookie conversion for WBM2SW Rings */
1329 val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
1330 val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
1331 ab->hw_params->hal_params->wbm2sw_cc_enable;
1332
1333 ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
1334 }
1335
ath12k_dp_cc_cookie_gen(u16 ppt_idx,u16 spt_idx)1336 static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
1337 {
1338 return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
1339 }
1340
ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base * ab,u16 ppt_idx,u16 spt_idx)1341 static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
1342 u16 ppt_idx, u16 spt_idx)
1343 {
1344 struct ath12k_dp *dp = &ab->dp;
1345
1346 return dp->spt_info[ppt_idx].vaddr + spt_idx;
1347 }
1348
ath12k_dp_get_rx_desc(struct ath12k_base * ab,u32 cookie)1349 struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
1350 u32 cookie)
1351 {
1352 struct ath12k_dp *dp = &ab->dp;
1353 struct ath12k_rx_desc_info **desc_addr_ptr;
1354 u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1355
1356 ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1357 spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1358
1359 start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
1360 end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
1361
1362 if (ppt_idx < start_ppt_idx ||
1363 ppt_idx >= end_ppt_idx ||
1364 spt_idx > ATH12K_MAX_SPT_ENTRIES)
1365 return NULL;
1366
1367 ppt_idx = ppt_idx - dp->rx_ppt_base;
1368 desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1369
1370 return *desc_addr_ptr;
1371 }
1372
ath12k_dp_get_tx_desc(struct ath12k_base * ab,u32 cookie)1373 struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
1374 u32 cookie)
1375 {
1376 struct ath12k_tx_desc_info **desc_addr_ptr;
1377 u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1378
1379 ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1380 spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1381
1382 start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
1383 end_ppt_idx = start_ppt_idx +
1384 (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
1385
1386 if (ppt_idx < start_ppt_idx ||
1387 ppt_idx >= end_ppt_idx ||
1388 spt_idx > ATH12K_MAX_SPT_ENTRIES)
1389 return NULL;
1390
1391 desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1392
1393 return *desc_addr_ptr;
1394 }
1395
ath12k_dp_cc_desc_init(struct ath12k_base * ab)1396 static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
1397 {
1398 struct ath12k_dp *dp = &ab->dp;
1399 struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
1400 struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
1401 u32 i, j, pool_id, tx_spt_page;
1402 u32 ppt_idx, cookie_ppt_idx;
1403
1404 spin_lock_bh(&dp->rx_desc_lock);
1405
1406 /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
1407 for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1408 rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
1409 GFP_ATOMIC);
1410
1411 if (!rx_descs) {
1412 spin_unlock_bh(&dp->rx_desc_lock);
1413 return -ENOMEM;
1414 }
1415
1416 ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
1417 cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
1418 dp->spt_info->rxbaddr[i] = &rx_descs[0];
1419
1420 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1421 rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
1422 rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
1423 list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
1424
1425 /* Update descriptor VA in SPT */
1426 rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1427 *rx_desc_addr = &rx_descs[j];
1428 }
1429 }
1430
1431 spin_unlock_bh(&dp->rx_desc_lock);
1432
1433 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1434 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1435 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1436 tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
1437 GFP_ATOMIC);
1438
1439 if (!tx_descs) {
1440 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1441 /* Caller takes care of TX pending and RX desc cleanup */
1442 return -ENOMEM;
1443 }
1444
1445 tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1446 ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
1447
1448 dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
1449
1450 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1451 tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
1452 tx_descs[j].pool_id = pool_id;
1453 list_add_tail(&tx_descs[j].list,
1454 &dp->tx_desc_free_list[pool_id]);
1455
1456 /* Update descriptor VA in SPT */
1457 tx_desc_addr =
1458 ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1459 *tx_desc_addr = &tx_descs[j];
1460 }
1461 }
1462 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1463 }
1464 return 0;
1465 }
1466
ath12k_dp_cmem_init(struct ath12k_base * ab,struct ath12k_dp * dp,enum ath12k_dp_desc_type type)1467 static int ath12k_dp_cmem_init(struct ath12k_base *ab,
1468 struct ath12k_dp *dp,
1469 enum ath12k_dp_desc_type type)
1470 {
1471 u32 cmem_base;
1472 int i, start, end;
1473
1474 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1475
1476 switch (type) {
1477 case ATH12K_DP_TX_DESC:
1478 start = ATH12K_TX_SPT_PAGE_OFFSET;
1479 end = start + ATH12K_NUM_TX_SPT_PAGES;
1480 break;
1481 case ATH12K_DP_RX_DESC:
1482 cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
1483 start = ATH12K_RX_SPT_PAGE_OFFSET;
1484 end = start + ATH12K_NUM_RX_SPT_PAGES;
1485 break;
1486 default:
1487 ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
1488 return -EINVAL;
1489 }
1490
1491 /* Write to PPT in CMEM */
1492 for (i = start; i < end; i++)
1493 ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
1494 dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
1495
1496 return 0;
1497 }
1498
ath12k_dp_cc_init(struct ath12k_base * ab)1499 static int ath12k_dp_cc_init(struct ath12k_base *ab)
1500 {
1501 struct ath12k_dp *dp = &ab->dp;
1502 int i, ret = 0;
1503
1504 INIT_LIST_HEAD(&dp->rx_desc_free_list);
1505 spin_lock_init(&dp->rx_desc_lock);
1506
1507 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1508 INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
1509 INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
1510 spin_lock_init(&dp->tx_desc_lock[i]);
1511 }
1512
1513 dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
1514 if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
1515 dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
1516
1517 dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
1518 GFP_KERNEL);
1519
1520 if (!dp->spt_info) {
1521 ath12k_warn(ab, "SPT page allocation failure");
1522 return -ENOMEM;
1523 }
1524
1525 dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
1526
1527 for (i = 0; i < dp->num_spt_pages; i++) {
1528 dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
1529 ATH12K_PAGE_SIZE,
1530 &dp->spt_info[i].paddr,
1531 GFP_KERNEL);
1532
1533 if (!dp->spt_info[i].vaddr) {
1534 ret = -ENOMEM;
1535 goto free;
1536 }
1537
1538 if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
1539 ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
1540 ret = -EINVAL;
1541 goto free;
1542 }
1543 }
1544
1545 ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC);
1546 if (ret) {
1547 ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
1548 goto free;
1549 }
1550
1551 ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC);
1552 if (ret) {
1553 ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
1554 goto free;
1555 }
1556
1557 ret = ath12k_dp_cc_desc_init(ab);
1558 if (ret) {
1559 ath12k_warn(ab, "HW CC desc init failed %d", ret);
1560 goto free;
1561 }
1562
1563 return 0;
1564 free:
1565 ath12k_dp_cc_cleanup(ab);
1566 return ret;
1567 }
1568
ath12k_dp_reoq_lut_setup(struct ath12k_base * ab)1569 static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
1570 {
1571 struct ath12k_dp *dp = &ab->dp;
1572
1573 if (!ab->hw_params->reoq_lut_support)
1574 return 0;
1575
1576 dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
1577 DP_REOQ_LUT_SIZE,
1578 &dp->reoq_lut.paddr,
1579 GFP_KERNEL | __GFP_ZERO);
1580 if (!dp->reoq_lut.vaddr) {
1581 ath12k_warn(ab, "failed to allocate memory for reoq table");
1582 return -ENOMEM;
1583 }
1584
1585 ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
1586 dp->reoq_lut.paddr);
1587 return 0;
1588 }
1589
1590 static enum hal_rx_buf_return_buf_manager
ath12k_dp_get_idle_link_rbm(struct ath12k_base * ab)1591 ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
1592 {
1593 switch (ab->device_id) {
1594 case 0:
1595 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
1596 case 1:
1597 return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
1598 case 2:
1599 return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
1600 default:
1601 ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
1602 ab->device_id);
1603 WARN_ON(1);
1604 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
1605 }
1606 }
1607
ath12k_dp_alloc(struct ath12k_base * ab)1608 int ath12k_dp_alloc(struct ath12k_base *ab)
1609 {
1610 struct ath12k_dp *dp = &ab->dp;
1611 struct hal_srng *srng = NULL;
1612 size_t size = 0;
1613 u32 n_link_desc = 0;
1614 int ret;
1615 int i;
1616
1617 dp->ab = ab;
1618
1619 INIT_LIST_HEAD(&dp->reo_cmd_list);
1620 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1621 spin_lock_init(&dp->reo_cmd_lock);
1622
1623 dp->reo_cmd_cache_flush_count = 0;
1624 dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
1625
1626 ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
1627 if (ret) {
1628 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1629 return ret;
1630 }
1631
1632 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1633
1634 ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
1635 HAL_WBM_IDLE_LINK, srng, n_link_desc);
1636 if (ret) {
1637 ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
1638 return ret;
1639 }
1640
1641 ret = ath12k_dp_cc_init(ab);
1642
1643 if (ret) {
1644 ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
1645 goto fail_link_desc_cleanup;
1646 }
1647 ret = ath12k_dp_init_bank_profiles(ab);
1648 if (ret) {
1649 ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
1650 goto fail_hw_cc_cleanup;
1651 }
1652
1653 ret = ath12k_dp_srng_common_setup(ab);
1654 if (ret)
1655 goto fail_dp_bank_profiles_cleanup;
1656
1657 size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
1658
1659 ret = ath12k_dp_reoq_lut_setup(ab);
1660 if (ret) {
1661 ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
1662 goto fail_cmn_srng_cleanup;
1663 }
1664
1665 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1666 dp->tx_ring[i].tcl_data_ring_id = i;
1667
1668 dp->tx_ring[i].tx_status_head = 0;
1669 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1670 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1671 if (!dp->tx_ring[i].tx_status) {
1672 ret = -ENOMEM;
1673 /* FIXME: The allocated tx status is not freed
1674 * properly here
1675 */
1676 goto fail_cmn_reoq_cleanup;
1677 }
1678 }
1679
1680 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1681 ath12k_hal_tx_set_dscp_tid_map(ab, i);
1682
1683 ret = ath12k_dp_rx_alloc(ab);
1684 if (ret)
1685 goto fail_dp_rx_free;
1686
1687 /* Init any SOC level resource for DP */
1688
1689 return 0;
1690
1691 fail_dp_rx_free:
1692 ath12k_dp_rx_free(ab);
1693
1694 fail_cmn_reoq_cleanup:
1695 ath12k_dp_reoq_lut_cleanup(ab);
1696
1697 fail_cmn_srng_cleanup:
1698 ath12k_dp_srng_common_cleanup(ab);
1699
1700 fail_dp_bank_profiles_cleanup:
1701 ath12k_dp_deinit_bank_profiles(ab);
1702
1703 fail_hw_cc_cleanup:
1704 ath12k_dp_cc_cleanup(ab);
1705
1706 fail_link_desc_cleanup:
1707 ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1708 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1709
1710 return ret;
1711 }
1712