1 /*
2  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_nbuf.h>         /* qdf_nbuf_t, etc. */
20 #include <htt.h>              /* HTT_TX_EXT_TID_MGMT */
21 #include <ol_htt_tx_api.h>    /* htt_tx_desc_tid */
22 #include <ol_txrx_api.h>      /* ol_txrx_vdev_handle */
23 #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
24 #include <ol_txrx.h>
25 #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
26 #include <ol_txrx_types.h>    /* pdev stats */
27 #include <ol_tx_desc.h>       /* ol_tx_desc */
28 #include <ol_tx_send.h>       /* ol_tx_send */
29 #include <ol_txrx_peer_find.h>
30 #include <ol_tx_classify.h>
31 #include <ol_tx_queue.h>
32 #include <ipv4.h>
33 #include <ipv6_defs.h>
34 #include <ip_prot.h>
35 #include <enet.h>             /* ETHERTYPE_VLAN, etc. */
36 #include <cds_ieee80211_common.h>        /* ieee80211_frame */
37 #include <cdp_txrx_handle.h>
38 /*
39  * In theory, this tx classify code could be used on the host or in the target.
40  * Thus, this code uses generic OS primitives, that can be aliased to either
41  * the host's OS primitives or the target's OS primitives.
42  * For now, the following #defines set up these host-specific or
43  * target-specific aliases.
44  */
45 
46 #define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
47 #define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
48 
49 #ifdef QCA_TX_HTT2_SUPPORT
50 static void
ol_tx_classify_htt2_frm(struct ol_txrx_vdev_t * vdev,qdf_nbuf_t tx_nbuf,struct ol_txrx_msdu_info_t * tx_msdu_info)51 ol_tx_classify_htt2_frm(
52 	struct ol_txrx_vdev_t *vdev,
53 	qdf_nbuf_t tx_nbuf,
54 	struct ol_txrx_msdu_info_t *tx_msdu_info)
55 {
56 	struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
57 	A_UINT8 candi_frm = 0;
58 
59 	/*
60 	 * Offload the frame re-order to L3 protocol and ONLY support
61 	 * TCP protocol now.
62 	 */
63 	if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
64 	    (htt->info.frame_type == htt_frm_type_data) &&
65 	    htt->info.is_unicast &&
66 	    (htt->info.ethertype == ETHERTYPE_IPV4)) {
67 		struct ipv4_hdr_t *ipHdr;
68 
69 		ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
70 			htt->info.l3_hdr_offset);
71 		if (ipHdr->protocol == IP_PROTOCOL_TCP)
72 			candi_frm = 1;
73 	}
74 
75 	qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
76 }
77 
78 #define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info)      \
79 	ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info)
80 #else
81 #define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info)      /* no-op */
82 #endif /* QCA_TX_HTT2_SUPPORT */
83 /* DHCP go with voice priority; WMM_AC_VO_TID1();*/
84 #define TX_DHCP_TID  6
85 
86 #if defined(QCA_BAD_PEER_TX_FLOW_CL)
87 static inline A_BOOL
ol_if_tx_bad_peer_txq_overflow(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,struct ol_tx_frms_queue_t * txq)88 ol_if_tx_bad_peer_txq_overflow(
89 	struct ol_txrx_pdev_t *pdev,
90 	struct ol_txrx_peer_t *peer,
91 	struct ol_tx_frms_queue_t *txq)
92 {
93 	if (peer && pdev && txq && (peer->tx_limit_flag) &&
94 	    (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
95 		return true;
96 	else
97 		return false;
98 }
99 #else
ol_if_tx_bad_peer_txq_overflow(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,struct ol_tx_frms_queue_t * txq)100 static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
101 	struct ol_txrx_pdev_t *pdev,
102 	struct ol_txrx_peer_t *peer,
103 	struct ol_tx_frms_queue_t *txq)
104 {
105 	return false;
106 }
107 #endif
108 
109 /* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
110 #define TX_EAPOL_TID  6
111 
112 /* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
113 #define TX_ARP_TID  6
114 
115 /* For non-IP case, use default TID */
116 #define TX_DEFAULT_TID  0
117 
118 /*
119  * Determine IP TOS priority
120  * IP Tos format :
121  *        (Refer Pg 57 WMM-test-plan-v1.2)
122  * IP-TOS - 8bits
123  *            : DSCP(6-bits) ECN(2-bits)
124  *            : DSCP - P2 P1 P0 X X X
125  *                where (P2 P1 P0) form 802.1D
126  */
127 static inline A_UINT8
ol_tx_tid_by_ipv4(A_UINT8 * pkt)128 ol_tx_tid_by_ipv4(A_UINT8 *pkt)
129 {
130 	A_UINT8 ipPri, tid;
131 	struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
132 
133 	ipPri = ipHdr->tos >> 5;
134 	tid = ipPri & 0x7;
135 
136 	return tid;
137 }
138 
139 static inline A_UINT8
ol_tx_tid_by_ipv6(A_UINT8 * pkt)140 ol_tx_tid_by_ipv6(A_UINT8 *pkt)
141 {
142 	return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
143 }
144 
145 static inline void
ol_tx_set_ether_type(A_UINT8 * datap,struct ol_txrx_msdu_info_t * tx_msdu_info)146 ol_tx_set_ether_type(
147 	A_UINT8 *datap,
148 	struct ol_txrx_msdu_info_t *tx_msdu_info)
149 {
150 	A_UINT16 typeorlength;
151 	A_UINT8 *ptr;
152 	A_UINT8 *l3_data_ptr;
153 
154 	if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
155 		/* adjust hdr_ptr to RA */
156 		struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
157 
158 		if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
159 					IEEE80211_FC0_TYPE_DATA) {
160 			struct llc_snap_hdr_t *llc;
161 			/* dot11 encapsulated frame */
162 			struct ieee80211_qosframe *whqos =
163 					(struct ieee80211_qosframe *)datap;
164 			if (whqos->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
165 				tx_msdu_info->htt.info.l3_hdr_offset =
166 					sizeof(struct ieee80211_qosframe);
167 			} else {
168 				tx_msdu_info->htt.info.l3_hdr_offset =
169 					sizeof(struct ieee80211_frame);
170 			}
171 			llc = (struct llc_snap_hdr_t *)
172 				(datap + tx_msdu_info->htt.info.l3_hdr_offset);
173 			tx_msdu_info->htt.info.ethertype =
174 				(llc->ethertype[0] << 8) | llc->ethertype[1];
175 			/*
176 			 * l3_hdr_offset refers to the end of the 802.3 or
177 			 * 802.11 header, which may be a LLC/SNAP header rather
178 			 * than the IP header.
179 			 * Thus, don't increment l3_hdr_offset += sizeof(*llc);
180 			 * rather,leave it as is.
181 			 */
182 		} else {
183 			/*
184 			 * This function should only be applied to data frames.
185 			 * For management frames, we already know to use
186 			 * HTT_TX_EXT_TID_MGMT.
187 			 */
188 			TXRX_ASSERT2(0);
189 		}
190 	} else if (tx_msdu_info->htt.info.l2_hdr_type ==
191 					htt_pkt_type_ethernet) {
192 		ptr = (datap + QDF_MAC_ADDR_SIZE * 2);
193 		typeorlength = (ptr[0] << 8) | ptr[1];
194 		/*ETHERNET_HDR_LEN;*/
195 		l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
196 
197 		if (typeorlength == ETHERTYPE_VLAN) {
198 			ptr = (datap + QDF_MAC_ADDR_SIZE * 2
199 					+ ETHERTYPE_VLAN_LEN);
200 			typeorlength = (ptr[0] << 8) | ptr[1];
201 			l3_data_ptr += ETHERTYPE_VLAN_LEN;
202 		}
203 
204 		if (!IS_ETHERTYPE(typeorlength)) {
205 			/* 802.3 header*/
206 			struct llc_snap_hdr_t *llc_hdr =
207 				(struct llc_snap_hdr_t *)l3_data_ptr;
208 			typeorlength = (llc_hdr->ethertype[0] << 8) |
209 							llc_hdr->ethertype[1];
210 			l3_data_ptr += sizeof(struct llc_snap_hdr_t);
211 		}
212 
213 		tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
214 									datap);
215 		tx_msdu_info->htt.info.ethertype = typeorlength;
216 	}
217 }
218 
219 static inline A_UINT8
ol_tx_tid_by_ether_type(A_UINT8 * datap,struct ol_txrx_msdu_info_t * tx_msdu_info)220 ol_tx_tid_by_ether_type(
221 	A_UINT8 *datap,
222 	struct ol_txrx_msdu_info_t *tx_msdu_info)
223 {
224 	A_UINT8 tid;
225 	A_UINT8 *l3_data_ptr;
226 	A_UINT16 typeorlength;
227 
228 	l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
229 	typeorlength = tx_msdu_info->htt.info.ethertype;
230 
231 	/* IP packet, do packet inspection for TID */
232 	if (typeorlength == ETHERTYPE_IPV4) {
233 		tid = ol_tx_tid_by_ipv4(l3_data_ptr);
234 	} else if (typeorlength == ETHERTYPE_IPV6) {
235 		tid = ol_tx_tid_by_ipv6(l3_data_ptr);
236 	} else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
237 		/* EAPOL go with voice priority*/
238 		tid = TX_EAPOL_TID;
239 	} else if (typeorlength == ETHERTYPE_ARP) {
240 		tid = TX_ARP_TID;
241 	} else {
242 		/* For non-IP case, use default TID */
243 		tid = TX_DEFAULT_TID;
244 	}
245 	return tid;
246 }
247 
248 static inline A_UINT8
ol_tx_tid_by_raw_type(A_UINT8 * datap,struct ol_txrx_msdu_info_t * tx_msdu_info)249 ol_tx_tid_by_raw_type(
250 	A_UINT8 *datap,
251 	struct ol_txrx_msdu_info_t *tx_msdu_info)
252 {
253 	A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
254 
255 	/* adjust hdr_ptr to RA */
256 	struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
257 
258 	/* FIXME: This code does not handle 4 address formats. The QOS field
259 	 * is not at usual location.
260 	 */
261 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
262 					IEEE80211_FC0_TYPE_DATA) {
263 		/* dot11 encapsulated frame */
264 		struct ieee80211_qosframe *whqos =
265 					(struct ieee80211_qosframe *)datap;
266 		if (whqos->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)
267 			tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
268 		else
269 			tid = HTT_NON_QOS_TID;
270 	} else {
271 		/*
272 		 * This function should only be applied to data frames.
273 		 * For management frames, we already know to use
274 		 * HTT_TX_EXT_TID_MGMT.
275 		 */
276 		qdf_assert(0);
277 	}
278 	return tid;
279 }
280 
281 static A_UINT8
ol_tx_tid(struct ol_txrx_pdev_t * pdev,qdf_nbuf_t tx_nbuf,struct ol_txrx_msdu_info_t * tx_msdu_info)282 ol_tx_tid(
283 	struct ol_txrx_pdev_t *pdev,
284 	qdf_nbuf_t tx_nbuf,
285 	struct ol_txrx_msdu_info_t *tx_msdu_info)
286 {
287 	A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
288 	A_UINT8 tid;
289 
290 	if (pdev->frame_format == wlan_frm_fmt_raw) {
291 		tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
292 
293 		ol_tx_set_ether_type(datap, tx_msdu_info);
294 		tid = tx_msdu_info->htt.info.ext_tid ==
295 					QDF_NBUF_TX_EXT_TID_INVALID ?
296 			ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
297 			tx_msdu_info->htt.info.ext_tid;
298 	} else if (pdev->frame_format == wlan_frm_fmt_802_3) {
299 		tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
300 
301 		ol_tx_set_ether_type(datap, tx_msdu_info);
302 		tid =
303 			tx_msdu_info->htt.info.ext_tid ==
304 					QDF_NBUF_TX_EXT_TID_INVALID ?
305 				ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
306 				tx_msdu_info->htt.info.ext_tid;
307 	} else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
308 		struct llc_snap_hdr_t *llc;
309 
310 		tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
311 		tx_msdu_info->htt.info.l3_hdr_offset =
312 						sizeof(struct ieee80211_frame);
313 		llc = (struct llc_snap_hdr_t *)
314 			(datap + tx_msdu_info->htt.info.l3_hdr_offset);
315 		tx_msdu_info->htt.info.ethertype =
316 			(llc->ethertype[0] << 8) | llc->ethertype[1];
317 		/*
318 		 * Native WiFi is a special case of "raw" 802.11 header format.
319 		 * However, we expect that for all cases that use native WiFi,
320 		 * the TID will be directly specified out of band.
321 		 */
322 		tid = tx_msdu_info->htt.info.ext_tid;
323 	} else {
324 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
325 			  "Invalid standard frame type: %d\n",
326 			  pdev->frame_format);
327 		qdf_assert(0);
328 		tid = HTT_TX_EXT_TID_INVALID;
329 	}
330 	return tid;
331 }
332 
333 #if defined(FEATURE_WLAN_TDLS)
334 static inline
ol_tx_tdls_peer_find(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,uint8_t * dest_addr,uint8_t * peer_id)335 struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
336 						struct ol_txrx_vdev_t *vdev,
337 						uint8_t *dest_addr,
338 						uint8_t *peer_id)
339 {
340 	struct ol_txrx_peer_t *peer = NULL;
341 	uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
342 	enum peer_debug_id_type id_type = PEER_DEBUG_ID_OL_INTERNAL;
343 
344 	struct ol_txrx_peer_t *(*find_peer)(struct ol_txrx_pdev_t *pdev,
345 					    uint8_t *peer_mac_addr,
346 					    int mac_addr_is_aligned,
347 					    u8 check_valid,
348 					    enum peer_debug_id_type dbg_id)
349 		= ol_txrx_peer_find_hash_find_get_ref;
350 
351 	if (vdev->hlTdlsFlag) {
352 		peer = find_peer(pdev, vdev->hl_tdls_ap_mac_addr.raw,
353 				 0, 1, id_type);
354 
355 		if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
356 			ol_txrx_peer_release_ref(peer, id_type);
357 			peer = NULL;
358 		} else {
359 			if (peer) {
360 				*peer_id = peer->local_id;
361 				return peer;
362 			}
363 		}
364 	}
365 
366 	/* Packets destined to TDLS Peer or AP with 'No TDLS Link'.
367 	 * Optimized to directly get the peer based on 'dest_addr'
368 	 */
369 	if (vdev->last_real_peer &&
370 	    !qdf_mem_cmp(vdev->last_real_peer->mac_addr.raw,
371 			 dest_addr, QDF_MAC_ADDR_SIZE)) {
372 		ol_txrx_peer_get_ref(vdev->last_real_peer, id_type);
373 		*peer_id = vdev->last_real_peer->local_id;
374 		peer = vdev->last_real_peer;
375 	} else {
376 		/* packets destined for other peers or AP with TDLS Link */
377 		if (vdev->last_real_peer &&
378 		    !qdf_mem_cmp(vdev->hl_tdls_ap_mac_addr.raw,
379 				 zero_mac_addr,
380 				 QDF_MAC_ADDR_SIZE)) {
381 		/* With No TDLS Link return last_real_peer for both AP
382 		 * and other bss peer
383 		 */
384 			ol_txrx_peer_get_ref(vdev->last_real_peer, id_type);
385 			*peer_id = vdev->last_real_peer->local_id;
386 			peer = vdev->last_real_peer;
387 		} else { /* packet destined for other peers and AP when
388 			  * STA has TDLS link
389 			  */
390 			peer = find_peer(pdev, vdev->hl_tdls_ap_mac_addr.raw,
391 					 0, 1, id_type);
392 
393 			if (peer &&
394 			    (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
395 				ol_txrx_peer_release_ref(peer, id_type);
396 				peer = NULL;
397 			} else {
398 				if (peer)
399 					*peer_id = peer->local_id;
400 			}
401 		}
402 	}
403 	return peer;
404 }
405 
406 #else
ol_tx_tdls_peer_find(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,uint8_t * dest_addr,uint8_t * peer_id)407 static struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
408 						struct ol_txrx_vdev_t *vdev,
409 						uint8_t *dest_addr,
410 						uint8_t *peer_id)
411 {
412 	struct ol_txrx_peer_t *peer = NULL;
413 
414 	peer = ol_txrx_assoc_peer_find(vdev);
415 
416 	return peer;
417 }
418 #endif
419 
420 struct ol_tx_frms_queue_t *
ol_tx_classify(struct ol_txrx_vdev_t * vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t tx_nbuf,struct ol_txrx_msdu_info_t * tx_msdu_info)421 ol_tx_classify(
422 	struct ol_txrx_vdev_t *vdev,
423 	struct ol_tx_desc_t *tx_desc,
424 	qdf_nbuf_t tx_nbuf,
425 	struct ol_txrx_msdu_info_t *tx_msdu_info)
426 {
427 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
428 	struct ol_txrx_peer_t *peer = NULL;
429 	struct ol_tx_frms_queue_t *txq = NULL;
430 	A_UINT8 *dest_addr;
431 	A_UINT8 tid;
432 	u_int8_t peer_id;
433 
434 	TX_SCHED_DEBUG_PRINT("Enter");
435 	dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
436 	if (unlikely(!dest_addr)) {
437 		QDF_TRACE(QDF_MODULE_ID_TXRX,
438 				QDF_TRACE_LEVEL_ERROR,
439 				"Error: dest_addr is NULL.\n");
440 		return NULL; /*error*/
441 	}
442 	if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
443 	    (vdev->opmode == wlan_op_mode_ocb)) {
444 		txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
445 		tx_msdu_info->htt.info.ext_tid =
446 					HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
447 		if (vdev->opmode == wlan_op_mode_sta) {
448 			/*
449 			 * The STA sends a frame with a broadcast
450 			 * dest addr (DA) as a
451 			 * unicast frame to the AP's receive addr (RA).
452 			 * Find the peer object that represents the AP
453 			 * that the STA is associated with.
454 			 */
455 			peer = ol_txrx_assoc_peer_find(vdev);
456 			if (!peer) {
457 				QDF_TRACE(QDF_MODULE_ID_TXRX,
458 					  QDF_TRACE_LEVEL_ERROR,
459 					  "Error: STA %pK ("QDF_MAC_ADDR_FMT") trying to send bcast DA tx data frame w/o association\n",
460 					  vdev,
461 					  QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
462 				return NULL; /* error */
463 			} else if ((peer->security[
464 				OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
465 						!= htt_sec_type_wapi) &&
466 				   (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
467 				if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
468 						QDF_NBUF_CB_GET_PACKET_TYPE(
469 								tx_nbuf)) {
470 					/* DHCP frame to go with
471 					 * voice priority
472 					 */
473 					txq = &peer->txqs[TX_DHCP_TID];
474 					tx_msdu_info->htt.info.ext_tid =
475 								TX_DHCP_TID;
476 				}
477 			}
478 			/*
479 			 * The following line assumes each peer object has a
480 			 * single ID. This is currently true, and is expected
481 			 * to remain true.
482 			 */
483 			tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
484 		} else if (vdev->opmode == wlan_op_mode_ocb) {
485 			tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
486 			/*
487 			 * In OCB mode, don't worry about the peer.
488 			 * We don't need it.
489 			 */
490 			peer = NULL;
491 		} else {
492 			tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
493 			/*
494 			 * Look up the vdev's BSS peer, so that the
495 			 * classify_extension function can check whether to
496 			 * encrypt multicast / broadcast frames.
497 			 */
498 			peer = ol_txrx_peer_find_hash_find_get_ref
499 						(pdev,
500 						 vdev->mac_addr.raw,
501 						 0, 1,
502 						 PEER_DEBUG_ID_OL_INTERNAL);
503 			if (!peer) {
504 				QDF_TRACE(QDF_MODULE_ID_TXRX,
505 					  QDF_TRACE_LEVEL_ERROR,
506 					  "Error: vdev %pK ("QDF_MAC_ADDR_FMT") trying to send bcast/mcast, but no self-peer found\n",
507 					  vdev,
508 					  QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
509 				return NULL; /* error */
510 			}
511 		}
512 		tx_msdu_info->htt.info.is_unicast = false;
513 	} else {
514 		/* tid would be overwritten for non QoS case*/
515 		tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
516 		if ((HTT_TX_EXT_TID_INVALID == tid) ||
517 		    (tid >= OL_TX_NUM_TIDS)) {
518 			QDF_TRACE(QDF_MODULE_ID_TXRX,
519 				  QDF_TRACE_LEVEL_ERROR,
520 				  "%s Error: could not classify packet into valid TID(%d).\n",
521 				  __func__, tid);
522 			return NULL;
523 		}
524 #ifdef ATH_SUPPORT_WAPI
525 		/* Check to see if a frame is a WAI frame */
526 		if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
527 			/* WAI frames should not be encrypted */
528 			tx_msdu_info->htt.action.do_encrypt = 0;
529 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
530 				  "Tx Frame is a WAI frame\n");
531 		}
532 #endif /* ATH_SUPPORT_WAPI */
533 
534 		/*
535 		 * Find the peer and increment its reference count.
536 		 * If this vdev is an AP, use the dest addr (DA) to determine
537 		 * which peer STA this unicast data frame is for.
538 		 * If this vdev is a STA, the unicast data frame is for the
539 		 * AP the STA is associated with.
540 		 */
541 		if (vdev->opmode == wlan_op_mode_sta) {
542 			/*
543 			 * TO DO:
544 			 * To support TDLS, first check if there is a TDLS
545 			 * peer STA,
546 			 * and if so, check if the DA matches the TDLS peer
547 			 * STA's MAC address. If there is no peer TDLS STA,
548 			 * or if the DA is not the TDLS STA's address,
549 			 * then the frame is either for the AP itself, or is
550 			 * supposed to be sent to the AP for forwarding.
551 			 */
552 			peer = ol_tx_tdls_peer_find(pdev, vdev,
553 						    dest_addr,
554 						    &peer_id);
555 		} else {
556 			peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
557 								   dest_addr,
558 								   0, 1,
559 						PEER_DEBUG_ID_OL_INTERNAL);
560 		}
561 		tx_msdu_info->htt.info.is_unicast = true;
562 		if (!peer) {
563 			/*
564 			 * Unicast data xfer can only happen to an
565 			 * associated peer. It is illegitimate to send unicast
566 			 * data if there is no peer to send it to.
567 			 */
568 			ol_txrx_err_rl("Error: vdev %pK (" QDF_MAC_ADDR_FMT ") trying to send unicast tx data frame to an unknown peer",
569 				       vdev,
570 				       QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
571 			return NULL; /* error */
572 		}
573 		TX_SCHED_DEBUG_PRINT("Peer found");
574 		if (!peer->qos_capable) {
575 			tid = OL_TX_NON_QOS_TID;
576 		} else if ((peer->security[
577 				OL_TXRX_PEER_SECURITY_UNICAST].sec_type
578 					!= htt_sec_type_wapi) &&
579 			   (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
580 			if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
581 					QDF_NBUF_CB_GET_PACKET_TYPE(tx_nbuf))
582 				/* DHCP frame to go with voice priority */
583 				tid = TX_DHCP_TID;
584 		}
585 
586 		/* Only allow encryption when in authenticated state */
587 		if (OL_TXRX_PEER_STATE_AUTH != peer->state)
588 			tx_msdu_info->htt.action.do_encrypt = 0;
589 
590 		txq = &peer->txqs[tid];
591 		tx_msdu_info->htt.info.ext_tid = tid;
592 		/*
593 		 * The following line assumes each peer object has a single ID.
594 		 * This is currently true, and is expected to remain true.
595 		 */
596 		tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
597 		/*
598 		 * WORKAROUND - check that the peer ID is valid.
599 		 * If tx data is provided before ol_rx_peer_map_handler is
600 		 * called to record the peer ID specified by the target,
601 		 * then we could end up here with an invalid peer ID.
602 		 * TO DO: rather than dropping the tx frame, pause the txq it
603 		 * goes into, then fill in the peer ID for the entries in the
604 		 * txq when the peer_map event provides the peer ID, and then
605 		 * unpause the txq.
606 		 */
607 		if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
608 			if (peer) {
609 				ol_txrx_info("remove the peer for invalid peer_id %pK",
610 					     peer);
611 				/* remove the peer reference added above */
612 				ol_txrx_peer_release_ref
613 						(peer,
614 						 PEER_DEBUG_ID_OL_INTERNAL);
615 				tx_msdu_info->peer = NULL;
616 			}
617 			return NULL;
618 		}
619 	}
620 	tx_msdu_info->peer = peer;
621 	if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
622 		return NULL;
623 	/*
624 	 * If relevant, do a deeper inspection to determine additional
625 	 * characteristics of the tx frame.
626 	 * If the frame is invalid, then the txq will be set to NULL to
627 	 * indicate an error.
628 	 */
629 	OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
630 	if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
631 				wlan_op_mode_sta && tx_msdu_info->peer !=
632 								NULL) {
633 		ol_txrx_dbg("remove the peer reference %pK", peer);
634 		/* remove the peer reference added above */
635 		ol_txrx_peer_release_ref(tx_msdu_info->peer,
636 					 PEER_DEBUG_ID_OL_INTERNAL);
637 		/* Making peer NULL in case if multicast non STA mode */
638 		tx_msdu_info->peer = NULL;
639 	}
640 
641 	/* Whether this frame can download though HTT2 data pipe or not. */
642 	OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
643 
644 	/* Update Tx Queue info */
645 	tx_desc->txq = txq;
646 
647 	TX_SCHED_DEBUG_PRINT("Leave");
648 	return txq;
649 }
650 
651 struct ol_tx_frms_queue_t *
ol_tx_classify_mgmt(struct ol_txrx_vdev_t * vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t tx_nbuf,struct ol_txrx_msdu_info_t * tx_msdu_info)652 ol_tx_classify_mgmt(
653 	struct ol_txrx_vdev_t *vdev,
654 	struct ol_tx_desc_t *tx_desc,
655 	qdf_nbuf_t tx_nbuf,
656 	struct ol_txrx_msdu_info_t *tx_msdu_info)
657 {
658 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
659 	struct ol_txrx_peer_t *peer = NULL;
660 	struct ol_tx_frms_queue_t *txq = NULL;
661 	A_UINT8 *dest_addr;
662 	union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
663 
664 	TX_SCHED_DEBUG_PRINT("Enter");
665 	dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
666 	if (unlikely(!dest_addr)) {
667 		QDF_TRACE(QDF_MODULE_ID_TXRX,
668 				QDF_TRACE_LEVEL_ERROR,
669 				"Error: dest_addr is NULL.\n");
670 		return NULL; /*error*/
671 	}
672 	if (IEEE80211_IS_MULTICAST(dest_addr)) {
673 		/*
674 		 * AP:  beacons are broadcast,
675 		 *      public action frames (e.g. extended channel
676 		 *      switch announce) may be broadcast
677 		 * STA: probe requests can be either broadcast or unicast
678 		 */
679 		txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
680 		tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
681 		tx_msdu_info->peer = NULL;
682 		tx_msdu_info->htt.info.is_unicast = 0;
683 	} else {
684 		/*
685 		 * Find the peer and increment its reference count.
686 		 * If this vdev is an AP, use the receiver addr (RA) to
687 		 * determine which peer STA this unicast mgmt frame is for.
688 		 * If this vdev is a STA, the unicast mgmt frame is for the
689 		 * AP the STA is associated with.
690 		 * Probe request / response and Assoc request / response are
691 		 * sent before the peer exists - in this case, use the
692 		 * vdev's default tx queue.
693 		 */
694 		if (vdev->opmode == wlan_op_mode_sta) {
695 			/*
696 			 * TO DO:
697 			 * To support TDLS, first check if there is a TDLS
698 			 * peer STA, and if so, check if the DA matches
699 			 * the TDLS peer STA's MAC address.
700 			 */
701 			peer = ol_txrx_assoc_peer_find(vdev);
702 			/*
703 			 * Some special case(preauth for example) needs to send
704 			 * unicast mgmt frame to unassociated AP. In such case,
705 			 * we need to check if dest addr match the associated
706 			 * peer addr. If not, we set peer as NULL to queue this
707 			 * frame to vdev queue.
708 			 */
709 			if (peer) {
710 
711 				qdf_mem_copy(
712 					&local_mac_addr_aligned.raw[0],
713 					dest_addr, QDF_MAC_ADDR_SIZE);
714 				mac_addr = &local_mac_addr_aligned;
715 				if (ol_txrx_peer_find_mac_addr_cmp
716 						(mac_addr,
717 						 &peer->mac_addr) != 0) {
718 					ol_txrx_peer_release_ref
719 						(peer,
720 						 PEER_DEBUG_ID_OL_INTERNAL);
721 					peer = NULL;
722 				}
723 			}
724 		} else {
725 			/* find the peer and increment its reference count */
726 			peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
727 								   dest_addr,
728 								   0, 1,
729 						PEER_DEBUG_ID_OL_INTERNAL);
730 		}
731 		tx_msdu_info->peer = peer;
732 		if (!peer) {
733 			txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
734 			tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
735 		} else {
736 			txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
737 			tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
738 			/*
739 			 * The following line assumes each peer object has a
740 			 * single ID. This is currently true, and is expected
741 			 * to remain true.
742 			 */
743 			tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
744 		}
745 		tx_msdu_info->htt.info.is_unicast = 1;
746 	}
747 	/*
748 	 * If relevant, do a deeper inspection to determine additional
749 	 * characteristics of the tx frame.
750 	 * If the frame is invalid, then the txq will be set to NULL to
751 	 * indicate an error.
752 	 */
753 	OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
754 				      tx_msdu_info, txq);
755 
756 	/* Whether this frame can download though HTT2 data pipe or not. */
757 	OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
758 
759 	/* Update Tx Queue info */
760 	tx_desc->txq = txq;
761 
762 	TX_SCHED_DEBUG_PRINT("Leave");
763 	return txq;
764 }
765 
766 #ifdef currently_unused
767 QDF_STATUS
ol_tx_classify_extension(struct ol_txrx_vdev_t * vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t tx_msdu,struct ol_txrx_msdu_info_t * msdu_info)768 ol_tx_classify_extension(
769 	struct ol_txrx_vdev_t *vdev,
770 	struct ol_tx_desc_t *tx_desc,
771 	qdf_nbuf_t tx_msdu,
772 	struct ol_txrx_msdu_info_t *msdu_info)
773 {
774 	u8 *datap = qdf_nbuf_data(tx_msdu);
775 	struct ol_txrx_peer_t *peer;
776 	int which_key;
777 
778 	/*
779 	 * The following msdu_info fields were already filled in by the
780 	 * ol_tx entry function or the regular ol_tx_classify function:
781 	 *     htt.info.vdev_id            (ol_tx_hl or ol_tx_non_std_hl)
782 	 *     htt.info.ext_tid            (ol_tx_non_std_hl or ol_tx_classify)
783 	 *     htt.info.frame_type         (ol_tx_hl or ol_tx_non_std_hl)
784 	 *     htt.info.l2_hdr_type        (ol_tx_hl or ol_tx_non_std_hl)
785 	 *     htt.info.is_unicast         (ol_tx_classify)
786 	 *     htt.info.peer_id            (ol_tx_classify)
787 	 *     peer                        (ol_tx_classify)
788 	 *     if (is_unicast) {
789 	 *         htt.info.ethertype      (ol_tx_classify)
790 	 *         htt.info.l3_hdr_offset  (ol_tx_classify)
791 	 *     }
792 	 * The following fields need to be filled in by this function:
793 	 *     if (!is_unicast) {
794 	 *         htt.info.ethertype
795 	 *         htt.info.l3_hdr_offset
796 	 *     }
797 	 *     htt.action.band (NOT CURRENTLY USED)
798 	 *     htt.action.do_encrypt
799 	 *     htt.action.do_tx_complete
800 	 * The following fields are not needed for data frames, and can
801 	 * be left uninitialized:
802 	 *     htt.info.frame_subtype
803 	 */
804 
805 	if (!msdu_info->htt.info.is_unicast) {
806 		int l2_hdr_size;
807 		u16 ethertype;
808 
809 		if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
810 			struct ethernet_hdr_t *eh;
811 
812 			eh = (struct ethernet_hdr_t *)datap;
813 			l2_hdr_size = sizeof(*eh);
814 			ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
815 
816 			if (ethertype == ETHERTYPE_VLAN) {
817 				struct ethernet_vlan_hdr_t *evh;
818 
819 				evh = (struct ethernet_vlan_hdr_t *)datap;
820 				l2_hdr_size = sizeof(*evh);
821 				ethertype = (evh->ethertype[0] << 8) |
822 							evh->ethertype[1];
823 			}
824 
825 			if (!IS_ETHERTYPE(ethertype)) {
826 				/* 802.3 header*/
827 				struct llc_snap_hdr_t *llc =
828 					(struct llc_snap_hdr_t *)(datap +
829 							l2_hdr_size);
830 				ethertype = (llc->ethertype[0] << 8) |
831 							llc->ethertype[1];
832 				l2_hdr_size += sizeof(*llc);
833 			}
834 			msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
835 			msdu_info->htt.info.ethertype = ethertype;
836 		} else { /* 802.11 */
837 			struct llc_snap_hdr_t *llc;
838 
839 			l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
840 			llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
841 			ethertype = (llc->ethertype[0] << 8) |
842 							llc->ethertype[1];
843 			/*
844 			 * Don't include the LLC/SNAP header in l2_hdr_size,
845 			 * because l3_hdr_offset is actually supposed to refer
846 			 * to the header after the 802.3 or 802.11 header,
847 			 * which could be a LLC/SNAP header rather
848 			 * than the L3 header.
849 			 */
850 		}
851 		msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
852 		msdu_info->htt.info.ethertype = ethertype;
853 		which_key = txrx_sec_mcast;
854 	} else {
855 		which_key = txrx_sec_ucast;
856 	}
857 	peer = msdu_info->peer;
858 	/*
859 	 * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
860 	 * Add more check here.
861 	 */
862 	msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
863 		(peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
864 		msdu_info->htt.action.do_encrypt;
865 	/*
866 	 * For systems that have a frame by frame spec for whether to receive
867 	 * a tx completion notification, use the tx completion notification
868 	 * only  for certain management frames, not for data frames.
869 	 * (In the future, this may be changed slightly, e.g. to request a
870 	 * tx completion notification for the final EAPOL message sent by a
871 	 * STA during the key delivery handshake.)
872 	 */
873 	msdu_info->htt.action.do_tx_complete = 0;
874 
875 	return QDF_STATUS_SUCCESS;
876 }
877 
878 QDF_STATUS
ol_tx_classify_mgmt_extension(struct ol_txrx_vdev_t * vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t tx_msdu,struct ol_txrx_msdu_info_t * msdu_info)879 ol_tx_classify_mgmt_extension(
880 		struct ol_txrx_vdev_t *vdev,
881 		struct ol_tx_desc_t *tx_desc,
882 		qdf_nbuf_t tx_msdu,
883 		struct ol_txrx_msdu_info_t *msdu_info)
884 {
885 	struct ieee80211_frame *wh;
886 
887 	/*
888 	 * The following msdu_info fields were already filled in by the
889 	 * ol_tx entry function or the regular ol_tx_classify_mgmt function:
890 	 *     htt.info.vdev_id          (ol_txrx_mgmt_send)
891 	 *     htt.info.frame_type       (ol_txrx_mgmt_send)
892 	 *     htt.info.l2_hdr_type      (ol_txrx_mgmt_send)
893 	 *     htt.action.do_tx_complete (ol_txrx_mgmt_send)
894 	 *     htt.info.peer_id          (ol_tx_classify_mgmt)
895 	 *     htt.info.ext_tid          (ol_tx_classify_mgmt)
896 	 *     htt.info.is_unicast       (ol_tx_classify_mgmt)
897 	 *     peer                      (ol_tx_classify_mgmt)
898 	 * The following fields need to be filled in by this function:
899 	 *     htt.info.frame_subtype
900 	 *     htt.info.l3_hdr_offset
901 	 *     htt.action.band (NOT CURRENTLY USED)
902 	 * The following fields are not needed for mgmt frames, and can
903 	 * be left uninitialized:
904 	 *     htt.info.ethertype
905 	 *     htt.action.do_encrypt
906 	 *         (This will be filled in by other SW, which knows whether
907 	 *         the peer has robust-management-frames enabled.)
908 	 */
909 	wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
910 	msdu_info->htt.info.frame_subtype =
911 		(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
912 		IEEE80211_FC0_SUBTYPE_SHIFT;
913 	msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
914 
915 	return QDF_STATUS_SUCCESS;
916 }
917 #endif
918