xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 
40 #define DP_INVALID_VDEV_ID 0xFF
41 
42 #define DP_TX_MAX_NUM_FRAGS 6
43 
44 /*
45  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
46  * please do not change this flag's definition
47  */
48 #define DP_TX_DESC_FLAG_FRAG		0x1
49 #define DP_TX_DESC_FLAG_TO_FW		0x2
50 #define DP_TX_DESC_FLAG_SIMPLE		0x4
51 #define DP_TX_DESC_FLAG_RAW		0x8
52 #define DP_TX_DESC_FLAG_MESH		0x10
53 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
54 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
55 #define DP_TX_DESC_FLAG_ME		0x80
56 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
57 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
58 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
59 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
60 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
61 #define DP_TX_DESC_FLAG_FLUSH		0x2000
62 
63 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
64 
65 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
66 do {                                                           \
67 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
68 	qdf_nbuf_free(buf);                                    \
69 } while (0)
70 
71 #define OCB_HEADER_VERSION	 1
72 
73 #ifdef TX_PER_PDEV_DESC_POOL
74 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
75 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
76 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
77 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
78 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
79 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
80 #else
81 	#ifdef TX_PER_VDEV_DESC_POOL
82 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
83 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
84 	#endif /* TX_PER_VDEV_DESC_POOL */
85 #endif /* TX_PER_PDEV_DESC_POOL */
86 #define DP_TX_QUEUE_MASK 0x3
87 
88 #define MAX_CDP_SEC_TYPE 12
89 
90 /* number of dwords for htt_tx_msdu_desc_ext2_t */
91 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
92 
93 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
94 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
95 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
96 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
97 #define dp_tx_info(params...) \
98 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
99 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
100 
101 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
102 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
103 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
104 #define dp_tx_comp_info(params...) \
105 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
106 #define dp_tx_comp_info_rl(params...) \
107 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
108 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
109 
110 #ifndef QCA_HOST_MODE_WIFI_DISABLED
111 
112 /**
113  * struct dp_tx_frag_info_s
114  * @vaddr: hlos vritual address for buffer
115  * @paddr_lo: physical address lower 32bits
116  * @paddr_hi: physical address higher bits
117  * @len: length of the buffer
118  */
119 struct dp_tx_frag_info_s {
120 	uint8_t  *vaddr;
121 	uint32_t paddr_lo;
122 	uint16_t paddr_hi;
123 	uint16_t len;
124 };
125 
126 /**
127  * struct dp_tx_seg_info_s - Segmentation Descriptor
128  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
129  * @frag_cnt: Fragment count in this segment
130  * @total_len: Total length of segment
131  * @frags: per-Fragment information
132  * @next: pointer to next MSDU segment
133  */
134 struct dp_tx_seg_info_s  {
135 	qdf_nbuf_t nbuf;
136 	uint16_t frag_cnt;
137 	uint16_t total_len;
138 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
139 	struct dp_tx_seg_info_s *next;
140 };
141 
142 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
143 
144 /**
145  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
146  * @num_segs: Number of segments (TSO/ME) in the frame
147  * @total_len: Total length of the frame
148  * @curr_seg: Points to current segment descriptor to be processed. Chain of
149  * 	      descriptors for SG frames/multicast-unicast converted packets.
150  *
151  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
152  * carry fragmentation information
153  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
154  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
155  * converted into set of skb sg (nr_frags) structures.
156  */
157 struct dp_tx_sg_info_s {
158 	uint32_t num_segs;
159 	uint32_t total_len;
160 	struct dp_tx_seg_info_s *curr_seg;
161 };
162 
163 /**
164  * struct dp_tx_queue - Tx queue
165  * @desc_pool_id: Descriptor Pool to be used for the tx queue
166  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
167  *
168  * Tx queue contains information of the software (Descriptor pool)
169  * and hardware resources (TCL ring id) to be used for a particular
170  * transmit queue (obtained from skb_queue_mapping in case of linux)
171  */
172 struct dp_tx_queue {
173 	uint8_t desc_pool_id;
174 	uint8_t ring_id;
175 };
176 
177 /**
178  * struct dp_tx_msdu_info_s - MSDU Descriptor
179  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
180  * @tx_queue: Tx queue on which this MSDU should be transmitted
181  * @num_seg: Number of segments (TSO)
182  * @tid: TID (override) that is sent from HLOS
183  * @u.tso_info: TSO information for TSO frame types
184  * 	     (chain of the TSO segments, number of segments)
185  * @u.sg_info: Scatter Gather information for non-TSO SG frames
186  * @meta_data: Mesh meta header information
187  * @exception_fw: Duplicate frame to be sent to firmware
188  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
189  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
190  * @gsn: global sequence for reinjected mcast packets
191  * @vdev_id : vdev_id for reinjected mcast packets
192  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
193  *
194  * This structure holds the complete MSDU information needed to program the
195  * Hardware TCL and MSDU extension descriptors for different frame types
196  *
197  */
198 struct dp_tx_msdu_info_s {
199 	enum dp_tx_frm_type frm_type;
200 	struct dp_tx_queue tx_queue;
201 	uint32_t num_seg;
202 	uint8_t tid;
203 	uint8_t exception_fw;
204 	uint8_t is_tx_sniffer;
205 	union {
206 		struct qdf_tso_info_t tso_info;
207 		struct dp_tx_sg_info_s sg_info;
208 	} u;
209 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
210 	uint16_t ppdu_cookie;
211 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
212 #ifdef WLAN_MCAST_MLO
213 	uint16_t gsn;
214 	uint8_t vdev_id;
215 #endif
216 #endif
217 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
218 	uint8_t skip_hp_update;
219 #endif
220 };
221 
222 #ifndef QCA_HOST_MODE_WIFI_DISABLED
223 /**
224  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
225  * @soc: core txrx context
226  * @index: index of ring to deinit
227  *
228  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
229  * index of the respective TCL/WBM2SW release in soc structure.
230  * For example, if the index is 2 then &soc->tcl_data_ring[2]
231  * and &soc->tx_comp_ring[2] will be deinitialized.
232  *
233  * Return: none
234  */
235 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
236 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
237 
238 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
239 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
240 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
241 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
242 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
243 					 uint8_t num_pool,
244 					 uint32_t num_desc);
245 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
246 					uint8_t num_pool,
247 					uint32_t num_desc);
248 void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc);
249 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
250 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
251 			 uint8_t tid, uint8_t ring_id);
252 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
253 				  struct dp_tx_desc_s *tx_desc,
254 				  struct hal_tx_completion_status *ts,
255 				  struct dp_txrx_peer *txrx_peer,
256 				  uint8_t ring_id);
257 void dp_tx_comp_process_desc(struct dp_soc *soc,
258 			     struct dp_tx_desc_s *desc,
259 			     struct hal_tx_completion_status *ts,
260 			     struct dp_txrx_peer *txrx_peer);
261 void dp_tx_reinject_handler(struct dp_soc *soc,
262 			    struct dp_vdev *vdev,
263 			    struct dp_tx_desc_s *tx_desc,
264 			    uint8_t *status,
265 			    uint8_t reinject_reason);
266 void dp_tx_inspect_handler(struct dp_soc *soc,
267 			   struct dp_vdev *vdev,
268 			   struct dp_tx_desc_s *tx_desc,
269 			   uint8_t *status);
270 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
271 				   uint32_t length, uint8_t tx_status,
272 				   bool update);
273 
274 #ifndef QCA_HOST_MODE_WIFI_DISABLED
275 /**
276  * dp_tso_attach() - TSO Attach handler
277  * @txrx_soc: Opaque Dp handle
278  *
279  * Reserve TSO descriptor buffers
280  *
281  * Return: QDF_STATUS_E_FAILURE on failure or
282  * QDF_STATUS_SUCCESS on success
283  */
284 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
285 
286 /**
287  * dp_tso_detach() - TSO Detach handler
288  * @txrx_soc: Opaque Dp handle
289  *
290  * Deallocate TSO descriptor buffers
291  *
292  * Return: QDF_STATUS_E_FAILURE on failure or
293  * QDF_STATUS_SUCCESS on success
294  */
295 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
296 
297 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
298 
299 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
300 				    qdf_nbuf_t nbuf);
301 
302 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
303 				qdf_nbuf_t nbuf,
304 				struct cdp_tx_exception_metadata *tx_exc);
305 
306 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
307 					      uint8_t vdev_id,
308 					      qdf_nbuf_t nbuf,
309 				struct cdp_tx_exception_metadata *tx_exc);
310 
311 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
312 			   qdf_nbuf_t nbuf);
313 qdf_nbuf_t
314 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
315 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
316 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
317 
318 #if QDF_LOCK_STATS
319 noinline qdf_nbuf_t
320 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
321 			 struct dp_tx_msdu_info_s *msdu_info);
322 #else
323 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
324 				    struct dp_tx_msdu_info_s *msdu_info);
325 #endif
326 #ifdef FEATURE_WLAN_TDLS
327 /**
328  * dp_tx_non_std() - Allow the control-path SW to send data frames
329  * @soc_hdl: Datapath soc handle
330  * @vdev_id: id of vdev
331  * @tx_spec: what non-standard handling to apply to the tx data frames
332  * @msdu_list: NULL-terminated list of tx MSDUs
333  *
334  * Return: NULL on success,
335  *         nbuf when it fails to send
336  */
337 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
338 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
339 #endif
340 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
341 
342 /**
343  * dp_tx_comp_handler() - Tx completion handler
344  * @int_ctx: pointer to DP interrupt context
345  * @soc: core txrx main context
346  * @hal_srng: Opaque HAL SRNG pointer
347  * @ring_id: completion ring id
348  * @quota: No. of packets/descriptors that can be serviced in one loop
349  *
350  * This function will collect hardware release ring element contents and
351  * handle descriptor contents. Based on contents, free packet or handle error
352  * conditions
353  *
354  * Return: Number of TX completions processed
355  */
356 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
357 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
358 			    uint32_t quota);
359 
360 QDF_STATUS
361 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
362 
363 QDF_STATUS
364 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
365 
366 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
367 
368 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
369 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
370 {
371 	return;
372 }
373 #endif
374 
375 /**
376  * dp_tx_pdev_init() - dp tx pdev init
377  * @pdev: physical device instance
378  *
379  * Return: QDF_STATUS_SUCCESS: success
380  *         QDF_STATUS_E_RESOURCES: Error return
381  */
382 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
383 {
384 	struct dp_soc *soc = pdev->soc;
385 
386 	/* Initialize Flow control counters */
387 	qdf_atomic_init(&pdev->num_tx_outstanding);
388 	pdev->tx_descs_max = 0;
389 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
390 		/* Initialize descriptors in TCL Ring */
391 		hal_tx_init_data_ring(soc->hal_soc,
392 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
393 	}
394 
395 	return QDF_STATUS_SUCCESS;
396 }
397 
398 /**
399  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
400  * @soc: Handle to HAL Soc structure
401  * @hal_soc: HAL SOC handle
402  * @num_avail_for_reap: descriptors available for reap
403  * @hal_ring_hdl: ring pointer
404  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
405  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
406  *
407  * Return: None
408  */
409 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
410 static inline
411 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
412 				    hal_soc_handle_t hal_soc,
413 				    uint32_t num_avail_for_reap,
414 				    hal_ring_handle_t hal_ring_hdl,
415 				    void **last_prefetched_hw_desc,
416 				    struct dp_tx_desc_s
417 				    **last_prefetched_sw_desc)
418 {
419 	if (*last_prefetched_sw_desc) {
420 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
421 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
422 	}
423 
424 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
425 		dp_tx_comp_get_prefetched_params_from_hal_desc(
426 						soc,
427 						*last_prefetched_hw_desc,
428 						last_prefetched_sw_desc);
429 		*last_prefetched_hw_desc =
430 			hal_srng_dst_prefetch_next_cached_desc(
431 					hal_soc,
432 					hal_ring_hdl,
433 					(uint8_t *)*last_prefetched_hw_desc);
434 	}
435 }
436 #else
437 static inline
438 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
439 				    hal_soc_handle_t hal_soc,
440 				    uint32_t num_avail_for_reap,
441 				    hal_ring_handle_t hal_ring_hdl,
442 				    void **last_prefetched_hw_desc,
443 				    struct dp_tx_desc_s
444 				    **last_prefetched_sw_desc)
445 {
446 }
447 #endif
448 
449 #ifndef FEATURE_WDS
450 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
451 {
452 	return;
453 }
454 #endif
455 
456 #ifndef QCA_MULTIPASS_SUPPORT
457 static inline
458 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
459 			     qdf_nbuf_t nbuf,
460 			     struct dp_tx_msdu_info_s *msdu_info)
461 {
462 	return true;
463 }
464 
465 static inline
466 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
467 {
468 }
469 
470 #else
471 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
472 			     qdf_nbuf_t nbuf,
473 			     struct dp_tx_msdu_info_s *msdu_info);
474 
475 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
476 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
477 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
478 				 struct dp_tx_msdu_info_s *msdu_info,
479 				 uint16_t group_key);
480 #endif
481 
482 /**
483  * dp_tx_hw_to_qdf()- convert hw status to qdf status
484  * @status: hw status
485  *
486  * Return: qdf tx rx status
487  */
488 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
489 {
490 	switch (status) {
491 	case HAL_TX_TQM_RR_FRAME_ACKED:
492 		return QDF_TX_RX_STATUS_OK;
493 	case HAL_TX_TQM_RR_REM_CMD_TX:
494 		return QDF_TX_RX_STATUS_NO_ACK;
495 	case HAL_TX_TQM_RR_REM_CMD_REM:
496 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
497 	case HAL_TX_TQM_RR_REM_CMD_AGED:
498 		return QDF_TX_RX_STATUS_FW_DISCARD;
499 	default:
500 		return QDF_TX_RX_STATUS_DEFAULT;
501 	}
502 }
503 
504 #ifndef QCA_HOST_MODE_WIFI_DISABLED
505 /**
506  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
507  * @vdev: DP Virtual device handle
508  * @nbuf: Buffer pointer
509  * @queue: queue ids container for nbuf
510  *
511  * TX packet queue has 2 instances, software descriptors id and dma ring id
512  * Based on tx feature and hardware configuration queue id combination could be
513  * different.
514  * For example -
515  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
516  * With no XPS,lock based resource protection, Descriptor pool ids are different
517  * for each vdev, dma ring id will be same as single pdev id
518  *
519  * Return: None
520  */
521 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
522 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
523 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
524 {
525 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) &
526 				DP_TX_QUEUE_MASK;
527 
528 	queue->desc_pool_id = queue_offset;
529 	queue->ring_id = qdf_nbuf_get_queue_mapping(nbuf);
530 }
531 
532 /*
533  * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
534  * @dp_soc - DP soc structure pointer
535  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
536  *
537  * Return - HAL ring handle
538  */
539 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
540 						       uint8_t ring_id)
541 {
542 	if (ring_id == soc->num_tcl_data_rings)
543 		return soc->tcl_cmd_credit_ring.hal_srng;
544 
545 	return soc->tcl_data_ring[ring_id].hal_srng;
546 }
547 
548 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
549 
550 #ifdef TX_MULTI_TCL
551 #ifdef IPA_OFFLOAD
552 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
553 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
554 {
555 	/* get flow id */
556 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
557 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
558 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
559 	else
560 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
561 					vdev->pdev->soc->num_tcl_data_rings);
562 }
563 #else
564 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
565 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
566 {
567 	/* get flow id */
568 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
569 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
570 				vdev->pdev->soc->num_tcl_data_rings);
571 }
572 #endif
573 #else
574 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
575 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
576 {
577 	/* get flow id */
578 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
579 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
580 }
581 #endif
582 
583 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
584 						       uint8_t ring_id)
585 {
586 	return soc->tcl_data_ring[ring_id].hal_srng;
587 }
588 #endif
589 
590 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
591 /*
592  * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
593  * @dp_soc - DP soc structure pointer
594  * @hal_ring_hdl - HAL ring handle
595  *
596  * Return - None
597  */
598 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
599 					      hal_ring_handle_t hal_ring_hdl)
600 {
601 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
602 }
603 
604 /*
605  * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
606  * @dp_soc - DP soc structure pointer
607  * @hal_ring_hdl - HAL ring handle
608  *
609  * Return - None
610  */
611 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
612 					     hal_ring_handle_t hal_ring_hdl)
613 {
614 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
615 }
616 
617 /*
618  * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
619  * @dp_soc - DP soc structure pointer
620  * @hal_ring_hdl - HAL ring handle
621  *
622  * Return - None
623  */
624 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
625 						  hal_ring_handle_t
626 						  hal_ring_hdl)
627 {
628 }
629 
630 #else
631 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
632 					      hal_ring_handle_t hal_ring_hdl)
633 {
634 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
635 }
636 
637 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
638 					     hal_ring_handle_t hal_ring_hdl)
639 {
640 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
641 }
642 
643 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
644 						  hal_ring_handle_t
645 						  hal_ring_hdl)
646 {
647 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
648 }
649 #endif
650 
651 #ifdef ATH_TX_PRI_OVERRIDE
652 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
653 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
654 #else
655 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
656 #endif
657 
658 /* TODO TX_FEATURE_NOT_YET */
659 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
660 {
661 	return;
662 }
663 /* TODO TX_FEATURE_NOT_YET */
664 
665 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
666 		      bool force_free);
667 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
668 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
669 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
670 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
671 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
672 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
673 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
674 void
675 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
676 			     uint32_t buf_type);
677 #else /* QCA_HOST_MODE_WIFI_DISABLED */
678 
679 static inline
680 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
681 {
682 	return QDF_STATUS_SUCCESS;
683 }
684 
685 static inline
686 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
687 {
688 	return QDF_STATUS_SUCCESS;
689 }
690 
691 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
692 {
693 }
694 
695 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
696 {
697 }
698 
699 static inline
700 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
701 		      bool force_free)
702 {
703 }
704 
705 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
706 {
707 	return QDF_STATUS_SUCCESS;
708 }
709 
710 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
711 {
712 	return QDF_STATUS_SUCCESS;
713 }
714 
715 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
716 {
717 }
718 
719 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
720 
721 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
722 	defined(QCA_TX_CAPTURE_SUPPORT) || \
723 	defined(QCA_MCOPY_SUPPORT)
724 #ifdef FEATURE_PERPKT_INFO
725 QDF_STATUS
726 dp_get_completion_indication_for_stack(struct dp_soc *soc,
727 				       struct dp_pdev *pdev,
728 				       struct dp_txrx_peer *peer,
729 				       struct hal_tx_completion_status *ts,
730 				       qdf_nbuf_t netbuf,
731 				       uint64_t time_latency);
732 
733 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
734 			    uint16_t peer_id, uint32_t ppdu_id,
735 			    qdf_nbuf_t netbuf);
736 #endif
737 #else
738 static inline
739 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
740 				       struct dp_pdev *pdev,
741 				       struct dp_txrx_peer *peer,
742 				       struct hal_tx_completion_status *ts,
743 				       qdf_nbuf_t netbuf,
744 				       uint64_t time_latency)
745 {
746 	return QDF_STATUS_E_NOSUPPORT;
747 }
748 
749 static inline
750 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
751 			    uint16_t peer_id, uint32_t ppdu_id,
752 			    qdf_nbuf_t netbuf)
753 {
754 }
755 #endif
756 
757 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
758 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
759 				       struct dp_tx_desc_s *desc,
760 				       struct hal_tx_completion_status *ts);
761 #else
762 static inline void
763 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
764 				  struct dp_tx_desc_s *desc,
765 				  struct hal_tx_completion_status *ts)
766 {
767 }
768 #endif
769 
770 #ifndef QCA_HOST_MODE_WIFI_DISABLED
771 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
772 /**
773  * dp_tx_update_stats() - Update soc level tx stats
774  * @soc: DP soc handle
775  * @tx_desc: TX descriptor reference
776  * @ring_id: TCL ring id
777  *
778  * Returns: none
779  */
780 void dp_tx_update_stats(struct dp_soc *soc,
781 			struct dp_tx_desc_s *tx_desc,
782 			uint8_t ring_id);
783 
784 /**
785  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
786  * @soc: Datapath soc handle
787  * @tx_desc: tx packet descriptor
788  * @tid: TID for pkt transmission
789  * @msdu_info: MSDU info of tx packet
790  * @ring_id: TCL ring id
791  *
792  * Returns: 1, if coalescing is to be done
793  *	    0, if coalescing is not to be done
794  */
795 int
796 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
797 			 struct dp_tx_desc_s *tx_desc,
798 			 uint8_t tid,
799 			 struct dp_tx_msdu_info_s *msdu_info,
800 			 uint8_t ring_id);
801 
802 /**
803  * dp_tx_ring_access_end() - HAL ring access end for data transmission
804  * @soc: Datapath soc handle
805  * @hal_ring_hdl: HAL ring handle
806  * @coalesce: Coalesce the current write or not
807  *
808  * Returns: none
809  */
810 void
811 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
812 		      int coalesce);
813 #else
814 /**
815  * dp_tx_update_stats() - Update soc level tx stats
816  * @soc: DP soc handle
817  * @tx_desc: TX descriptor reference
818  * @ring_id: TCL ring id
819  *
820  * Returns: none
821  */
822 static inline void dp_tx_update_stats(struct dp_soc *soc,
823 				      struct dp_tx_desc_s *tx_desc,
824 				      uint8_t ring_id){ }
825 
826 static inline void
827 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
828 		      int coalesce)
829 {
830 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
831 }
832 
833 static inline int
834 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
835 			 struct dp_tx_desc_s *tx_desc,
836 			 uint8_t tid,
837 			 struct dp_tx_msdu_info_s *msdu_info,
838 			 uint8_t ring_id)
839 {
840 	return 0;
841 }
842 
843 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
844 
845 #ifdef FEATURE_RUNTIME_PM
846 /**
847  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
848  * @soc_hdl: DP soc handle
849  * @is_high_tput: flag to indicate whether throughput is high
850  *
851  * Returns: none
852  */
853 static inline
854 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
855 					 bool is_high_tput)
856 {
857 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
858 
859 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
860 }
861 
862 void
863 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
864 			      hal_ring_handle_t hal_ring_hdl,
865 			      int coalesce);
866 #else
867 #ifdef DP_POWER_SAVE
868 void
869 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
870 			      hal_ring_handle_t hal_ring_hdl,
871 			      int coalesce);
872 #else
873 static inline void
874 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
875 			      hal_ring_handle_t hal_ring_hdl,
876 			      int coalesce)
877 {
878 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
879 }
880 #endif
881 
882 static inline void
883 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
884 				    bool is_high_tput)
885 { }
886 #endif
887 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
888 
889 #ifdef DP_TX_HW_DESC_HISTORY
890 static inline void
891 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
892 			 hal_ring_handle_t hal_ring_hdl,
893 			 struct dp_soc *soc)
894 {
895 	struct dp_tx_hw_desc_evt *evt;
896 	uint64_t idx = 0;
897 
898 	if (!soc->tx_hw_desc_history)
899 		return;
900 
901 	idx = ++soc->tx_hw_desc_history->index;
902 	if (idx == DP_TX_HW_DESC_HIST_MAX)
903 		soc->tx_hw_desc_history->index = 0;
904 	idx = qdf_do_div_rem(idx, DP_TX_HW_DESC_HIST_MAX);
905 
906 	evt = &soc->tx_hw_desc_history->entry[idx];
907 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
908 	evt->posted = qdf_get_log_timestamp();
909 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
910 }
911 #else
912 static inline void
913 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
914 			 hal_ring_handle_t hal_ring_hdl,
915 			 struct dp_soc *soc)
916 {
917 }
918 #endif
919 
920 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(CONFIG_SAWF)
921 /**
922  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
923  * @ts: Tx completion status
924  * @delta_tsf: Difference between TSF clock and qtimer
925  * @delay_us: Delay in microseconds
926  *
927  * Return: QDF_STATUS_SUCCESS   : Success
928  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
929  *                                delay_us is NULL
930  *         QDF_STATUS_E_FAILURE : Error in delay calculation
931  */
932 QDF_STATUS
933 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
934 			  uint32_t delta_tsf,
935 			  uint32_t *delay_us);
936 
937 /**
938  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
939  * @soc_hdl: cdp soc pointer
940  * @vdev_id: vdev id
941  * @delta_tsf: difference between TSF clock and qtimer
942  *
943  * Return: None
944  */
945 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
946 		      uint32_t delta_tsf);
947 #endif
948 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
949 /**
950  * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
951  * @soc_hdl: cdp soc pointer
952  * @vdev_id: vdev id
953  * @enable: true to enable and false to disable
954  *
955  * Return: QDF_STATUS
956  */
957 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
958 				      uint8_t vdev_id, bool enable);
959 
960 /**
961  * dp_get_uplink_delay() - Get uplink delay value
962  * @soc_hdl: cdp soc pointer
963  * @vdev_id: vdev id
964  * @val: pointer to save uplink delay value
965  *
966  * Return: QDF_STATUS
967  */
968 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
969 			       uint32_t *val);
970 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
971 
972 /**
973  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
974  *
975  * Return: True if any tx pkt tracepoint is enabled else false
976  */
977 static inline
978 bool dp_tx_pkt_tracepoints_enabled(void)
979 {
980 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
981 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
982 		qdf_trace_dp_tx_comp_pkt_enabled());
983 }
984 
985 #ifdef DP_TX_TRACKING
986 /**
987  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
988  * @tx_desc - tx descriptor
989  *
990  * Return: None
991  */
992 static inline
993 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
994 {
995 	tx_desc->timestamp_tick = qdf_system_ticks();
996 }
997 
998 /**
999  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1000  * @tx_desc: tx descriptor
1001  *
1002  * Check for corruption in tx descriptor, if magic pattern is not matching
1003  * trigger self recovery
1004  *
1005  * Return: none
1006  */
1007 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1008 #else
1009 static inline
1010 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1011 {
1012 }
1013 
1014 static inline
1015 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1016 {
1017 }
1018 #endif
1019 
1020 #ifndef CONFIG_SAWF
1021 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1022 {
1023 	return false;
1024 }
1025 #endif
1026 
1027 #ifdef HW_TX_DELAY_STATS_ENABLE
1028 /**
1029  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1030  * @vdev: DP vdev handle
1031  * @tx_desc: tx descriptor
1032  *
1033  * Return: true when descriptor is timestamped, false otherwise
1034  */
1035 static inline
1036 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1037 			       struct dp_tx_desc_s *tx_desc)
1038 {
1039 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1040 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1041 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1042 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1043 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1044 		tx_desc->timestamp = qdf_ktime_real_get();
1045 		return true;
1046 	}
1047 	return false;
1048 }
1049 #else
1050 static inline
1051 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1052 			       struct dp_tx_desc_s *tx_desc)
1053 {
1054 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1055 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1056 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1057 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1058 		tx_desc->timestamp = qdf_ktime_real_get();
1059 		return true;
1060 	}
1061 	return false;
1062 }
1063 #endif
1064 
1065 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1066 /**
1067  * dp_pkt_add_timestamp() - add timestamp in data payload
1068  *
1069  * @vdev: dp vdev
1070  * @index: index to decide offset in payload
1071  * @time: timestamp to add in data payload
1072  * @nbuf: network buffer
1073  *
1074  * Return: none
1075  */
1076 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1077 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1078 			  qdf_nbuf_t nbuf);
1079 /**
1080  * dp_pkt_get_timestamp() - get current system time
1081  *
1082  * @time: return current system time
1083  *
1084  * Return: none
1085  */
1086 void dp_pkt_get_timestamp(uint64_t *time);
1087 #else
1088 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1089 
1090 static inline
1091 void dp_pkt_get_timestamp(uint64_t *time)
1092 {
1093 }
1094 #endif
1095 #endif
1096