1 /* 2 * Copyright (c) 2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_rx_buffer_pool.h" 20 21 #ifndef DP_RX_BUFFER_POOL_SIZE 22 #define DP_RX_BUFFER_POOL_SIZE 128 23 #endif 24 25 #ifndef DP_RX_BUFFER_POOL_ALLOC_THRES 26 #define DP_RX_BUFFER_POOL_ALLOC_THRES 1 27 #endif 28 29 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 30 bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id) 31 { 32 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 33 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 34 struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id]; 35 qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf; 36 bool consumed = false; 37 38 if (!bufpool->is_initialized) 39 return consumed; 40 41 /* process only buffers of RXDMA ring */ 42 if (qdf_unlikely(rx_desc_pool != 43 dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id))) 44 return consumed; 45 46 first_nbuf = nbuf; 47 48 while (nbuf) { 49 next_nbuf = qdf_nbuf_next(nbuf); 50 51 if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >= 52 DP_RX_BUFFER_POOL_SIZE)) 53 break; 54 55 refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 56 RX_BUFFER_RESERVATION, 57 rx_desc_pool->buf_alignment, 58 FALSE); 59 60 /* Failed to allocate new nbuf, reset and place it back 61 * in to the pool. 62 */ 63 if (!refill_nbuf) { 64 DP_STATS_INC(pdev, 65 rx_buffer_pool.num_bufs_consumed, 1); 66 consumed = true; 67 break; 68 } 69 70 /* Successful allocation!! */ 71 DP_STATS_INC(pdev, 72 rx_buffer_pool.num_bufs_alloc_success, 1); 73 qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q, 74 refill_nbuf); 75 nbuf = next_nbuf; 76 } 77 78 nbuf = first_nbuf; 79 if (consumed) { 80 /* Free the MSDU/scattered MSDU */ 81 while (nbuf) { 82 next_nbuf = qdf_nbuf_next(nbuf); 83 dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id); 84 nbuf = next_nbuf; 85 } 86 } 87 88 return consumed; 89 } 90 91 void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id) 92 { 93 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 94 struct rx_desc_pool *rx_desc_pool; 95 struct rx_buff_pool *buff_pool; 96 97 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 98 mac_id = dp_pdev->lmac_id; 99 100 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 101 buff_pool = &soc->rx_buff_pool[mac_id]; 102 103 if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >= 104 DP_RX_BUFFER_POOL_SIZE)) 105 return qdf_nbuf_free(nbuf); 106 107 qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION, 108 rx_desc_pool->buf_alignment); 109 qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf); 110 } 111 112 qdf_nbuf_t 113 dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id, 114 struct rx_desc_pool *rx_desc_pool, 115 uint32_t num_available_buffers) 116 { 117 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 118 struct rx_buff_pool *buff_pool; 119 struct dp_srng *dp_rxdma_srng; 120 qdf_nbuf_t nbuf; 121 122 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 123 mac_id = dp_pdev->lmac_id; 124 125 buff_pool = &soc->rx_buff_pool[mac_id]; 126 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 127 128 nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 129 RX_BUFFER_RESERVATION, 130 rx_desc_pool->buf_alignment, 131 FALSE); 132 133 if (!buff_pool->is_initialized) 134 return nbuf; 135 136 if (qdf_likely(nbuf)) { 137 buff_pool->nbuf_fail_cnt = 0; 138 return nbuf; 139 } 140 141 buff_pool->nbuf_fail_cnt++; 142 143 /* Allocate buffer from the buffer pool */ 144 if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES || 145 (num_available_buffers < dp_rxdma_srng->num_entries / 10)) { 146 nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q); 147 if (nbuf) 148 DP_STATS_INC(dp_pdev, 149 rx_buffer_pool.num_pool_bufs_replenish, 1); 150 } 151 152 return nbuf; 153 } 154 155 void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id) 156 { 157 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 158 struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id]; 159 qdf_nbuf_t nbuf; 160 int i; 161 162 if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) { 163 dp_err("RX buffer pool support is disabled"); 164 buff_pool->is_initialized = false; 165 return; 166 } 167 168 if (buff_pool->is_initialized) 169 return; 170 171 qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q); 172 173 for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) { 174 nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 175 RX_BUFFER_RESERVATION, 176 rx_desc_pool->buf_alignment, FALSE); 177 if (!nbuf) 178 continue; 179 qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, 180 nbuf); 181 } 182 183 dp_info("RX buffer pool required allocation: %u actual allocation: %u", 184 DP_RX_BUFFER_POOL_SIZE, 185 qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q)); 186 187 buff_pool->is_initialized = true; 188 } 189 190 void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id) 191 { 192 struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id]; 193 qdf_nbuf_t nbuf; 194 195 if (!buff_pool->is_initialized) 196 return; 197 198 dp_info("buffers in the RX buffer pool during deinit: %u", 199 qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q)); 200 201 while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q))) 202 qdf_nbuf_free(nbuf); 203 204 buff_pool->is_initialized = false; 205 } 206 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */ 207