1  /*
2   * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  /**
21   * @file ol_tx.h
22   * @brief Internal definitions for the high-level tx module.
23   */
24  #ifndef _OL_TX__H_
25  #define _OL_TX__H_
26  
27  #include <qdf_nbuf.h>           /* qdf_nbuf_t */
28  #include <qdf_lock.h>
29  #include <cdp_txrx_cmn.h>       /* ol_txrx_vdev_t, etc. */
30  #include <cdp_txrx_misc.h>      /* ol_tx_spec */
31  #include <cdp_txrx_handle.h>
32  #include <ol_txrx_types.h>      /* ol_tx_desc_t, ol_txrx_msdu_info_t */
33  #include <ol_txrx.h>
34  #include <hif.h>
35  
36  #ifdef IPA_OFFLOAD
37  /**
38   * ol_tx_send_ipa_data_frame() - send IPA data frame
39   * @soc_hdl: datapath soc handle
40   * @vdev: virtual interface id
41   * @skb: skb
42   *
43   * Return: skb/ NULL is for success
44   */
45  qdf_nbuf_t ol_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
46  				     qdf_nbuf_t skb);
47  #endif
48  
49  #ifdef CONFIG_LL_DP_SUPPORT
50  struct ol_tx_desc_t *
51  ol_tx_prepare_ll(ol_txrx_vdev_handle vdev,
52  		 qdf_nbuf_t msdu,
53  		 struct ol_txrx_msdu_info_t *msdu_info);
54  #endif
55  
56  qdf_nbuf_t ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
57  #ifdef WLAN_FEATURE_FASTPATH
58  qdf_nbuf_t ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
59  
60  void ol_tx_setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
61  				     struct ol_txrx_pdev_t *pdev);
62  #else
63  static inline
ol_tx_setup_fastpath_ce_handles(struct hif_opaque_softc * osc,struct ol_txrx_pdev_t * pdev)64  void ol_tx_setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
65  				     struct ol_txrx_pdev_t *pdev)
66  { }
67  
68  qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
69  #endif
70  
71  qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
72  
73  #ifdef CONFIG_HL_SUPPORT
74  #define OL_TX_SEND ol_tx_hl
75  #else
76  #define OL_TX_SEND OL_TX_LL
77  #endif
78  
79  #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
80  #define OL_TX_LL ol_tx_ll_queue
81  #else
82  #define OL_TX_LL ol_tx_ll_wrapper
83  #endif
84  
85  #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
86  void ol_tx_hl_vdev_bundle_timer(void *context);
87  
88  void ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t *vdev);
89  qdf_nbuf_t
90  ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t *pdev);
91  #else
92  static inline
ol_tx_hl_vdev_bundle_timer(void * context)93  void ol_tx_hl_vdev_bundle_timer(void *context)
94  {
95  }
96  
97  static inline
ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t * vdev)98  void ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t *vdev)
99  {
100  }
101  
102  static inline
103  qdf_nbuf_t
ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t * pdev)104  ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t *pdev)
105  {
106  	return NULL;
107  }
108  #endif
109  
110  #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
111  void ol_tx_vdev_ll_pause_queue_send(void *context);
112  void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev);
113  #else
ol_tx_vdev_ll_pause_queue_send(void * context)114  static inline void ol_tx_vdev_ll_pause_queue_send(void *context)
115  {
116  }
117  static inline
ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t * pdev)118  void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
119  {
120  }
121  #endif
122  
123  /**
124   * ol_tx_comp_hw_to_qdf_status(): map ol hw to qdf status
125   * @status: hw status
126   *
127   * Return: qdf tx rx status
128   */
129  static inline enum qdf_dp_tx_rx_status
ol_tx_comp_hw_to_qdf_status(uint16_t status)130  ol_tx_comp_hw_to_qdf_status(uint16_t status)
131  {
132  	switch (status) {
133  	case HTT_TX_COMPL_IND_STAT_OK:
134  		return QDF_TX_RX_STATUS_OK;
135  	case HTT_TX_COMPL_IND_STAT_DISCARD:
136  	case HTT_TX_COMPL_IND_STAT_DROP:
137  		return QDF_TX_RX_STATUS_FW_DISCARD;
138  	case HTT_TX_COMPL_IND_STAT_NO_ACK:
139  		return QDF_TX_RX_STATUS_NO_ACK;
140  	default:
141  		return QDF_TX_RX_STATUS_DEFAULT;
142  	}
143  }
144  
145  static inline
ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)146  int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
147  {
148  	return	tx_spec &
149  		(OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
150  }
151  
152  static inline
ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)153  uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
154  {
155  	uint8_t sub_type = 0x1; /* 802.11 MAC header present */
156  
157  	if (tx_spec & OL_TX_SPEC_NO_AGGR)
158  		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
159  	if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
160  		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
161  	if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
162  		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
163  	return sub_type;
164  }
165  
166  /**
167   * ol_tx_hl() - transmit tx frames for a HL system.
168   * @vdev: the virtual device transmit the data
169   * @msdu_list: the tx frames to send
170   *
171   * Return: NULL if all MSDUs are accepted
172   */
173  qdf_nbuf_t
174  ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
175  
176  /**
177   * ol_tx_non_std() - Allow the control-path SW to send data frames
178   * @soc_hdl: Datapath soc handle
179   * @vdev_id: id of vdev
180   * @tx_spec: what non-standard handling to apply to the tx data frames
181   * @msdu_list: NULL-terminated list of tx MSDUs
182   *
183   * Generally, all tx data frames come from the OS shim into the txrx layer.
184   * However, there are rare cases such as TDLS messaging where the UMAC
185   * control-path SW creates tx data frames.
186   *  This UMAC SW can call this function to provide the tx data frames to
187   *  the txrx layer.
188   *  The UMAC SW can request a callback for these data frames after their
189   *  transmission completes, by using the ol_txrx_data_tx_cb_set function
190   *  to register a tx completion callback, and by specifying
191   *  ol_tx_spec_no_free as the tx_spec arg when giving the frames to
192   *  ol_tx_non_std.
193   *  The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
194   *  as specified by ol_cfg_frame_type().
195   *
196   *  Return: null - success, skb - failure
197   */
198  #ifdef CONFIG_HL_SUPPORT
199  qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
200  			    enum ol_tx_spec tx_spec,
201  			    qdf_nbuf_t msdu_list);
202  
203  static inline qdf_nbuf_t
ol_tx_non_std(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,enum ol_tx_spec tx_spec,qdf_nbuf_t msdu_list)204  ol_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
205  	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
206  {
207  	struct ol_txrx_vdev_t *vdev;
208  
209  	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
210  
211  	if (!vdev)
212  		return msdu_list;
213  	else
214  		return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
215  }
216  #else
217  qdf_nbuf_t ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
218  			    enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
219  
220  static inline qdf_nbuf_t
ol_tx_non_std(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,enum ol_tx_spec tx_spec,qdf_nbuf_t msdu_list)221  ol_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
222  	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
223  {
224  	struct ol_txrx_vdev_t *vdev;
225  
226  	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
227  
228  	if (!vdev)
229  		return msdu_list;
230  	else
231  		return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
232  }
233  #endif
234  
235  /**
236   * ol_tx_trace_pkt() - Trace TX packet at OL layer
237   *
238   * @skb: skb to be traced
239   * @msdu_id: msdu_id of the packet
240   * @vdev_id: vdev_id of the packet
241   * @op_mode: Vdev Operation mode
242   *
243   * Return: None
244   */
245  void ol_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, uint8_t vdev_id,
246  		     enum QDF_OPMODE op_mode);
247  
248  void ol_txrx_mgmt_tx_complete(void *ctxt, qdf_nbuf_t netbuf, int err);
249  
250  /**
251   * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
252   *	notifications for management frames.
253   * @soc: Datapath soc handle
254   * @pdev_id: Physical device instance id
255   * @type: the type of mgmt frame the callback is used for
256   * @download_cb: the callback for notification of delivery to the target
257   * @ota_ack_cb: the callback for notification of delivery to the peer
258   * @ctxt: context to use with the callback
259   *
260   * When the txrx SW receives notifications from the target that a tx frame
261   * has been delivered to its recipient, it will check if the tx frame
262   * is a management frame.  If so, the txrx SW will check the management
263   * frame type specified when the frame was submitted for transmission.
264   * If there is a callback function registered for the type of management
265   * frame in question, the txrx code will invoke the callback to inform
266   * the management + control SW that the mgmt frame was delivered.
267   * This function is used by the control SW to store a callback pointer
268   * for a given type of management frame.
269   */
270  QDF_STATUS
271  ol_txrx_mgmt_tx_cb_set(struct cdp_soc_t *soc, uint8_t pdev_id, uint8_t type,
272  		       ol_txrx_mgmt_tx_cb download_cb,
273  		       ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt);
274  
275  /**
276   * ol_txrx_mgmt_send_ext() - Transmit a management frame
277   * @soc: Datapath soc handle
278   * @vdev_id: virtual interface id
279   * @tx_mgmt_frm: management frame to transmit
280   * @type: the type of management frame (determines what callback to use)
281   * @use_6mbps: specify whether management frame to transmit should
282   *	use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
283   * @chanfreq: channel to transmit the frame on
284   *
285   * Send the specified management frame from the specified virtual device.
286   * The type is used for determining whether to invoke a callback to inform
287   * the sender that the tx mgmt frame was delivered, and if so, which
288   * callback to use.
289   *
290   * Return: 0 - the frame is accepted for transmission
291   *         1 - the frame was not accepted
292   */
293  int
294  ol_txrx_mgmt_send_ext(struct cdp_soc_t *soc, uint8_t vdev_id,
295  		      qdf_nbuf_t tx_mgmt_frm,
296  		      uint8_t type, uint8_t use_6mbps, uint16_t chanfreq);
297  
298  qdf_nbuf_t
299  ol_tx_reinject(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu, uint16_t peer_id);
300  
301  #if defined(FEATURE_TSO)
302  void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg);
303  void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev);
304  void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg);
305  void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev);
306  uint32_t ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t *pdev);
307  uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
308  			  qdf_nbuf_t msdu,
309  			  struct ol_txrx_msdu_info_t *msdu_info);
310  void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
311  			    struct qdf_tso_info_t  *tso_info, qdf_nbuf_t msdu,
312  			    uint32_t tso_msdu_idx);
313  #else
ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t * pdev)314  static inline uint32_t ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t *pdev)
315  {
316  	return 0;
317  }
318  
ol_tso_seg_list_init(struct ol_txrx_pdev_t * pdev,uint32_t num_seg)319  static inline void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev,
320  	uint32_t num_seg)
321  {
322  }
323  
ol_tso_seg_list_deinit(struct ol_txrx_pdev_t * pdev)324  static inline void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
325  {
326  }
327  
ol_tso_num_seg_list_init(struct ol_txrx_pdev_t * pdev,uint32_t num_seg)328  static inline void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev,
329  	uint32_t num_seg)
330  {
331  }
332  
ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t * pdev)333  static inline void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
334  {
335  }
336  
ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info)337  static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
338  					qdf_nbuf_t msdu,
339  					struct ol_txrx_msdu_info_t *msdu_info)
340  {
341  	return 0;
342  }
343  
ol_tx_tso_update_stats(struct ol_txrx_pdev_t * pdev,struct qdf_tso_info_t * tso_info,qdf_nbuf_t msdu,uint32_t tso_msdu_idx)344  static inline void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
345  					  struct qdf_tso_info_t  *tso_info,
346  					  qdf_nbuf_t msdu,
347  					  uint32_t tso_msdu_idx)
348  {
349  }
350  #endif
351  
352  #ifdef QCA_HL_NETDEV_FLOW_CONTROL
353  bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu);
354  #endif
355  
356  #if defined(HELIUMPLUS)
357  void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
358  #else
359  static inline
ol_txrx_dump_frag_desc(char * msg,struct ol_tx_desc_t * tx_desc)360  void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
361  {
362  }
363  #endif
364  
365  #endif /* _OL_TX__H_ */
366