1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4  *
5  * Contact Information:
6  * James P. Ketrenos <ipw2100-admin@linux.intel.com>
7  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8  *
9  * Few modifications for Realtek's Wi-Fi drivers by
10  * Andrea Merello <andrea.merello@gmail.com>
11  *
12  * A special thanks goes to Realtek for their support !
13  */
14 #include <linux/compiler.h>
15 #include <linux/errno.h>
16 #include <linux/if_arp.h>
17 #include <linux/in6.h>
18 #include <linux/in.h>
19 #include <linux/ip.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/skbuff.h>
26 #include <linux/slab.h>
27 #include <linux/tcp.h>
28 #include <linux/types.h>
29 #include <linux/wireless.h>
30 #include <linux/etherdevice.h>
31 #include <linux/uaccess.h>
32 #include <linux/if_vlan.h>
33 
34 #include "rtllib.h"
35 
36 /* 802.11 Data Frame
37  *
38  *
39  * 802.11 frame_control for data frames - 2 bytes
40  *      ,--------------------------------------------------------------------.
41  * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |  9 |  a |  b  |  c  |  d  | e  |
42  *      |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
43  * val  | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 |  0 |  x |  x  |  x  |  x  | x  |
44  *      |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
45  * desc |  ver  | type  |  ^-subtype-^  |to |from|more|retry| pwr |more |wep |
46  *      |       |       | x=0 data      |DS | DS |frag|     | mgm |data |    |
47  *      |       |       | x=1 data+ack  |   |    |    |     |     |     |    |
48  *      '--------------------------------------------------------------------'
49  *                                           /\
50  *                                           |
51  * 802.11 Data Frame                         |
52  *          ,--------- 'ctrl' expands to >---'
53  *          |
54  *       ,--'---,-------------------------------------------------------------.
55  * Bytes |  2   |  2   |    6    |    6    |    6    |  2   | 0..2312 |   4  |
56  *       |------|------|---------|---------|---------|------|---------|------|
57  * Desc. | ctrl | dura |  DA/RA  |   TA    |    SA   | Sequ |  Frame  |  fcs |
58  *       |      | tion | (BSSID) |         |         | ence |  data   |      |
59  *       `--------------------------------------------------|         |------'
60  * Total: 28 non-data bytes                                 `----.----'
61  *                                                               |
62  *        .- 'Frame data' expands to <---------------------------'
63  *        |
64  *        V
65  *       ,---------------------------------------------------.
66  * Bytes |  1   |  1   |    1    |    3     |  2   |  0-2304 |
67  *       |------|------|---------|----------|------|---------|
68  * Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP      |
69  *       | DSAP | SSAP |         |          |      | Packet  |
70  *       | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8|      |         |
71  *       `-----------------------------------------|         |
72  * Total: 8 non-data bytes                         `----.----'
73  *                                                      |
74  *        .- 'IP Packet' expands, if WEP enabled, to <--'
75  *        |
76  *        V
77  *       ,-----------------------.
78  * Bytes |  4  |   0-2296  |  4  |
79  *       |-----|-----------|-----|
80  * Desc. | IV  | Encrypted | ICV |
81  *       |     | IP Packet |     |
82  *       `-----------------------'
83  * Total: 8 non-data bytes
84  *
85  *
86  * 802.3 Ethernet Data Frame
87  *
88  *       ,-----------------------------------------.
89  * Bytes |   6   |   6   |  2   |  Variable |   4  |
90  *       |-------|-------|------|-----------|------|
91  * Desc. | Dest. | Source| Type | IP Packet |  fcs |
92  *       |  MAC  |  MAC  |      |	   |      |
93  *       `-----------------------------------------'
94  * Total: 18 non-data bytes
95  *
96  * In the event that fragmentation is required, the incoming payload is split
97  * into N parts of size ieee->fts.  The first fragment contains the SNAP header
98  * and the remaining packets are just data.
99  *
100  * If encryption is enabled, each fragment payload size is reduced by enough
101  * space to add the prefix and postfix (IV and ICV totalling 8 bytes in
102  * the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to
103  * 500 without encryption it will take 3 frames.  With WEP it will take 4 frames
104  * as the payload of each frame is reduced to 492 bytes.
105  *
106  * SKB visualization
107  *
108  * ,- skb->data
109  * |
110  * |    ETHERNET HEADER        ,-<-- PAYLOAD
111  * |                           |     14 bytes from skb->data
112  * |  2 bytes for Type --> ,T. |     (sizeof ethhdr)
113  * |                       | | |
114  * |,-Dest.--. ,--Src.---. | | |
115  * |  6 bytes| | 6 bytes | | | |
116  * v         | |         | | | |
117  * 0         | v       1 | v | v           2
118  * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
119  *     ^     | ^         | ^ |
120  *     |     | |         | | |
121  *     |     | |         | `T' <---- 2 bytes for Type
122  *     |     | |         |
123  *     |     | '---SNAP--' <-------- 6 bytes for SNAP
124  *     |     |
125  *     `-IV--' <-------------------- 4 bytes for IV (WEP)
126  *
127  *      SNAP HEADER
128  *
129  */
130 
131 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
132 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
133 
rtllib_put_snap(u8 * data,u16 h_proto)134 static int rtllib_put_snap(u8 *data, u16 h_proto)
135 {
136 	struct rtllib_snap_hdr *snap;
137 	u8 *oui;
138 
139 	snap = (struct rtllib_snap_hdr *)data;
140 	snap->dsap = 0xaa;
141 	snap->ssap = 0xaa;
142 	snap->ctrl = 0x03;
143 
144 	if (h_proto == 0x8137 || h_proto == 0x80f3)
145 		oui = P802_1H_OUI;
146 	else
147 		oui = RFC1042_OUI;
148 	snap->oui[0] = oui[0];
149 	snap->oui[1] = oui[1];
150 	snap->oui[2] = oui[2];
151 
152 	*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
153 
154 	return SNAP_SIZE + sizeof(u16);
155 }
156 
rtllib_encrypt_fragment(struct rtllib_device * ieee,struct sk_buff * frag,int hdr_len)157 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
158 			    int hdr_len)
159 {
160 	struct lib80211_crypt_data *crypt = NULL;
161 	int res;
162 
163 	crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
164 
165 	if (!(crypt && crypt->ops)) {
166 		netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
167 			    __func__);
168 		return -1;
169 	}
170 	/* To encrypt, frame format is:
171 	 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
172 	 */
173 
174 	/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
175 	 * call both MSDU and MPDU encryption functions from here.
176 	 */
177 	atomic_inc(&crypt->refcnt);
178 	res = 0;
179 	if (crypt->ops->encrypt_msdu)
180 		res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
181 	if (res == 0 && crypt->ops->encrypt_mpdu)
182 		res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
183 
184 	atomic_dec(&crypt->refcnt);
185 	if (res < 0) {
186 		netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
187 			    ieee->dev->name, frag->len);
188 		return -1;
189 	}
190 
191 	return 0;
192 }
193 
rtllib_txb_free(struct rtllib_txb * txb)194 void rtllib_txb_free(struct rtllib_txb *txb)
195 {
196 	if (unlikely(!txb))
197 		return;
198 	kfree(txb);
199 }
200 
rtllib_alloc_txb(int nr_frags,int txb_size,gfp_t gfp_mask)201 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
202 					   gfp_t gfp_mask)
203 {
204 	struct rtllib_txb *txb;
205 	int i;
206 
207 	txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
208 	if (!txb)
209 		return NULL;
210 
211 	txb->nr_frags = nr_frags;
212 	txb->frag_size = cpu_to_le16(txb_size);
213 
214 	for (i = 0; i < nr_frags; i++) {
215 		txb->fragments[i] = dev_alloc_skb(txb_size);
216 		if (unlikely(!txb->fragments[i]))
217 			goto err_free;
218 		memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
219 	}
220 
221 	return txb;
222 
223 err_free:
224 	while (--i >= 0)
225 		dev_kfree_skb_any(txb->fragments[i]);
226 	kfree(txb);
227 
228 	return NULL;
229 }
230 
rtllib_classify(struct sk_buff * skb)231 static int rtllib_classify(struct sk_buff *skb)
232 {
233 	struct ethhdr *eth;
234 	struct iphdr *ip;
235 
236 	eth = (struct ethhdr *)skb->data;
237 	if (eth->h_proto != htons(ETH_P_IP))
238 		return 0;
239 
240 #ifdef VERBOSE_DEBUG
241 	print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, skb->data,
242 			     skb->len);
243 #endif
244 	ip = ip_hdr(skb);
245 	switch (ip->tos & 0xfc) {
246 	case 0x20:
247 		return 2;
248 	case 0x40:
249 		return 1;
250 	case 0x60:
251 		return 3;
252 	case 0x80:
253 		return 4;
254 	case 0xa0:
255 		return 5;
256 	case 0xc0:
257 		return 6;
258 	case 0xe0:
259 		return 7;
260 	default:
261 		return 0;
262 	}
263 }
264 
rtllib_tx_query_agg_cap(struct rtllib_device * ieee,struct sk_buff * skb,struct cb_desc * tcb_desc)265 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
266 				    struct sk_buff *skb,
267 				    struct cb_desc *tcb_desc)
268 {
269 	struct rt_hi_throughput *ht_info = ieee->ht_info;
270 	struct tx_ts_record *ts = NULL;
271 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
272 
273 	if (rtllib_act_scanning(ieee, false))
274 		return;
275 
276 	if (!ht_info->current_ht_support || !ht_info->enable_ht)
277 		return;
278 	if (!is_qos_data_frame(skb->data))
279 		return;
280 	if (is_multicast_ether_addr(hdr->addr1))
281 		return;
282 
283 	if (tcb_desc->bdhcp || ieee->cnt_after_link < 2)
284 		return;
285 
286 	if (ht_info->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION)
287 		return;
288 
289 	if (!ieee->get_nmode_support_by_sec_cfg(ieee->dev))
290 		return;
291 	if (ht_info->current_ampdu_enable) {
292 		if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), hdr->addr1,
293 				   skb->priority, TX_DIR, true)) {
294 			netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
295 			return;
296 		}
297 		if (!ts->tx_admitted_ba_record.b_valid) {
298 			if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
299 			    KEY_TYPE_NA)) {
300 				;
301 			} else if (tcb_desc->bdhcp == 1) {
302 				;
303 			} else if (!ts->disable_add_ba) {
304 				rtllib_ts_start_add_ba_process(ieee, ts);
305 			}
306 			return;
307 		} else if (!ts->using_ba) {
308 			if (SN_LESS(ts->tx_admitted_ba_record.ba_start_seq_ctrl.field.seq_num,
309 				    (ts->tx_cur_seq + 1) % 4096))
310 				ts->using_ba = true;
311 			else
312 				return;
313 		}
314 		if (ieee->iw_mode == IW_MODE_INFRA) {
315 			tcb_desc->ampdu_enable = true;
316 			tcb_desc->ampdu_factor = ht_info->current_ampdu_factor;
317 			tcb_desc->ampdu_density = ht_info->current_mpdu_density;
318 		}
319 	}
320 }
321 
rtllib_query_short_preamble_mode(struct rtllib_device * ieee,struct cb_desc * tcb_desc)322 static void rtllib_query_short_preamble_mode(struct rtllib_device *ieee,
323 					     struct cb_desc *tcb_desc)
324 {
325 	tcb_desc->use_short_preamble = false;
326 	if (tcb_desc->data_rate == 2)
327 		return;
328 	else if (ieee->current_network.capability &
329 		 WLAN_CAPABILITY_SHORT_PREAMBLE)
330 		tcb_desc->use_short_preamble = true;
331 }
332 
rtllib_query_ht_cap_short_gi(struct rtllib_device * ieee,struct cb_desc * tcb_desc)333 static void rtllib_query_ht_cap_short_gi(struct rtllib_device *ieee,
334 					 struct cb_desc *tcb_desc)
335 {
336 	struct rt_hi_throughput *ht_info = ieee->ht_info;
337 
338 	tcb_desc->use_short_gi		= false;
339 
340 	if (!ht_info->current_ht_support || !ht_info->enable_ht)
341 		return;
342 
343 	if (ht_info->cur_bw_40mhz && ht_info->cur_short_gi_40mhz)
344 		tcb_desc->use_short_gi = true;
345 	else if (!ht_info->cur_bw_40mhz && ht_info->cur_short_gi_20mhz)
346 		tcb_desc->use_short_gi = true;
347 }
348 
rtllib_query_bandwidth_mode(struct rtllib_device * ieee,struct cb_desc * tcb_desc)349 static void rtllib_query_bandwidth_mode(struct rtllib_device *ieee,
350 					struct cb_desc *tcb_desc)
351 {
352 	struct rt_hi_throughput *ht_info = ieee->ht_info;
353 
354 	tcb_desc->packet_bw = false;
355 
356 	if (!ht_info->current_ht_support || !ht_info->enable_ht)
357 		return;
358 
359 	if (tcb_desc->multicast || tcb_desc->broadcast)
360 		return;
361 
362 	if ((tcb_desc->data_rate & 0x80) == 0)
363 		return;
364 	if (ht_info->cur_bw_40mhz && ht_info->cur_tx_bw40mhz &&
365 	    !ieee->bandwidth_auto_switch.forced_tx_20MHz)
366 		tcb_desc->packet_bw = true;
367 }
368 
rtllib_query_protectionmode(struct rtllib_device * ieee,struct cb_desc * tcb_desc,struct sk_buff * skb)369 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
370 					struct cb_desc *tcb_desc,
371 					struct sk_buff *skb)
372 {
373 	struct rt_hi_throughput *ht_info;
374 
375 	tcb_desc->rtsstbc			= false;
376 	tcb_desc->rts_use_short_gi		= false;
377 	tcb_desc->cts_enable			= false;
378 	tcb_desc->RTSSC				= 0;
379 	tcb_desc->rts_bw			= false;
380 
381 	if (tcb_desc->broadcast || tcb_desc->multicast)
382 		return;
383 
384 	if (is_broadcast_ether_addr(skb->data + 16))
385 		return;
386 
387 	if (ieee->mode < WIRELESS_MODE_N_24G) {
388 		if (skb->len > ieee->rts) {
389 			tcb_desc->rts_enable = true;
390 			tcb_desc->rts_rate = MGN_24M;
391 		} else if (ieee->current_network.buseprotection) {
392 			tcb_desc->rts_enable = true;
393 			tcb_desc->cts_enable = true;
394 			tcb_desc->rts_rate = MGN_24M;
395 		}
396 		return;
397 	}
398 
399 	ht_info = ieee->ht_info;
400 
401 	while (true) {
402 		if (ht_info->iot_action & HT_IOT_ACT_FORCED_CTS2SELF) {
403 			tcb_desc->cts_enable	= true;
404 			tcb_desc->rts_rate  =	MGN_24M;
405 			tcb_desc->rts_enable = true;
406 			break;
407 		} else if (ht_info->iot_action & (HT_IOT_ACT_FORCED_RTS |
408 			   HT_IOT_ACT_PURE_N_MODE)) {
409 			tcb_desc->rts_enable = true;
410 			tcb_desc->rts_rate  =	MGN_24M;
411 			break;
412 		}
413 		if (ieee->current_network.buseprotection) {
414 			tcb_desc->rts_enable = true;
415 			tcb_desc->cts_enable = true;
416 			tcb_desc->rts_rate = MGN_24M;
417 			break;
418 		}
419 		if (ht_info->current_ht_support && ht_info->enable_ht) {
420 			u8 ht_op_mode = ht_info->current_op_mode;
421 
422 			if ((ht_info->cur_bw_40mhz && (ht_op_mode == 2 ||
423 						       ht_op_mode == 3)) ||
424 			     (!ht_info->cur_bw_40mhz && ht_op_mode == 3)) {
425 				tcb_desc->rts_rate = MGN_24M;
426 				tcb_desc->rts_enable = true;
427 				break;
428 			}
429 		}
430 		if (skb->len > ieee->rts) {
431 			tcb_desc->rts_rate = MGN_24M;
432 			tcb_desc->rts_enable = true;
433 			break;
434 		}
435 		if (tcb_desc->ampdu_enable) {
436 			tcb_desc->rts_rate = MGN_24M;
437 			tcb_desc->rts_enable = false;
438 			break;
439 		}
440 		goto NO_PROTECTION;
441 	}
442 	if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
443 		tcb_desc->use_short_preamble = true;
444 	return;
445 NO_PROTECTION:
446 	tcb_desc->rts_enable	= false;
447 	tcb_desc->cts_enable	= false;
448 	tcb_desc->rts_rate	= 0;
449 	tcb_desc->RTSSC		= 0;
450 	tcb_desc->rts_bw	= false;
451 }
452 
rtllib_txrate_selectmode(struct rtllib_device * ieee,struct cb_desc * tcb_desc)453 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
454 				     struct cb_desc *tcb_desc)
455 {
456 	if (ieee->tx_dis_rate_fallback)
457 		tcb_desc->tx_dis_rate_fallback = true;
458 
459 	if (ieee->tx_use_drv_assinged_rate)
460 		tcb_desc->tx_use_drv_assinged_rate = true;
461 	if (!tcb_desc->tx_dis_rate_fallback ||
462 	    !tcb_desc->tx_use_drv_assinged_rate) {
463 		if (ieee->iw_mode == IW_MODE_INFRA)
464 			tcb_desc->ratr_index = 0;
465 	}
466 }
467 
rtllib_query_seqnum(struct rtllib_device * ieee,struct sk_buff * skb,u8 * dst)468 static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
469 			       u8 *dst)
470 {
471 	u16 seqnum = 0;
472 
473 	if (is_multicast_ether_addr(dst))
474 		return 0;
475 	if (is_qos_data_frame(skb->data)) {
476 		struct tx_ts_record *ts = NULL;
477 
478 		if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), dst,
479 				   skb->priority, TX_DIR, true))
480 			return 0;
481 		seqnum = ts->tx_cur_seq;
482 		ts->tx_cur_seq = (ts->tx_cur_seq + 1) % 4096;
483 		return seqnum;
484 	}
485 	return 0;
486 }
487 
wme_downgrade_ac(struct sk_buff * skb)488 static int wme_downgrade_ac(struct sk_buff *skb)
489 {
490 	switch (skb->priority) {
491 	case 6:
492 	case 7:
493 		skb->priority = 5; /* VO -> VI */
494 		return 0;
495 	case 4:
496 	case 5:
497 		skb->priority = 3; /* VI -> BE */
498 		return 0;
499 	case 0:
500 	case 3:
501 		skb->priority = 1; /* BE -> BK */
502 		return 0;
503 	default:
504 		return -1;
505 	}
506 }
507 
rtllib_current_rate(struct rtllib_device * ieee)508 static u8 rtllib_current_rate(struct rtllib_device *ieee)
509 {
510 	if (ieee->mode & IEEE_MODE_MASK)
511 		return ieee->rate;
512 
513 	if (ieee->ht_curr_op_rate)
514 		return ieee->ht_curr_op_rate;
515 	else
516 		return ieee->rate & 0x7F;
517 }
518 
rtllib_xmit_inter(struct sk_buff * skb,struct net_device * dev)519 static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
520 {
521 	struct rtllib_device *ieee = (struct rtllib_device *)
522 				     netdev_priv_rsl(dev);
523 	struct rtllib_txb *txb = NULL;
524 	struct ieee80211_qos_hdr *frag_hdr;
525 	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
526 	unsigned long flags;
527 	struct net_device_stats *stats = &ieee->stats;
528 	int ether_type = 0, encrypt;
529 	int bytes, fc, qos_ctl = 0, hdr_len;
530 	struct sk_buff *skb_frag;
531 	struct ieee80211_qos_hdr header = { /* Ensure zero initialized */
532 		.duration_id = 0,
533 		.seq_ctrl = 0,
534 		.qos_ctrl = 0
535 	};
536 	int qos_activated = ieee->current_network.qos_data.active;
537 	u8 dest[ETH_ALEN];
538 	u8 src[ETH_ALEN];
539 	struct lib80211_crypt_data *crypt = NULL;
540 	struct cb_desc *tcb_desc;
541 	u8 is_multicast = false;
542 	bool	bdhcp = false;
543 
544 	spin_lock_irqsave(&ieee->lock, flags);
545 
546 	/* If there is no driver handler to take the TXB, don't bother
547 	 * creating it...
548 	 */
549 	if (!(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) ||
550 	    ((!ieee->softmac_data_hard_start_xmit &&
551 	     (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
552 		netdev_warn(ieee->dev, "No xmit handler.\n");
553 		goto success;
554 	}
555 
556 	if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
557 		netdev_warn(ieee->dev, "skb too small (%d).\n",
558 			    skb->len);
559 		goto success;
560 	}
561 	/* Save source and destination addresses */
562 	ether_addr_copy(dest, skb->data);
563 	ether_addr_copy(src, skb->data + ETH_ALEN);
564 
565 	memset(skb->cb, 0, sizeof(skb->cb));
566 	ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
567 
568 	if (ieee->iw_mode == IW_MODE_MONITOR) {
569 		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
570 		if (unlikely(!txb)) {
571 			netdev_warn(ieee->dev,
572 				    "Could not allocate TXB\n");
573 			goto failed;
574 		}
575 
576 		txb->encrypted = 0;
577 		txb->payload_size = cpu_to_le16(skb->len);
578 		skb_put_data(txb->fragments[0], skb->data, skb->len);
579 
580 		goto success;
581 	}
582 
583 	if (skb->len > 282) {
584 		if (ether_type == ETH_P_IP) {
585 			const struct iphdr *ip = (struct iphdr *)
586 				((u8 *)skb->data + 14);
587 			if (ip->protocol == IPPROTO_UDP) {
588 				struct udphdr *udp;
589 
590 				udp = (struct udphdr *)((u8 *)ip +
591 				      (ip->ihl << 2));
592 				if (((((u8 *)udp)[1] == 68) &&
593 				     (((u8 *)udp)[3] == 67)) ||
594 				   ((((u8 *)udp)[1] == 67) &&
595 				   (((u8 *)udp)[3] == 68))) {
596 					bdhcp = true;
597 					ieee->lps_delay_cnt = 200;
598 				}
599 			}
600 		} else if (ether_type == ETH_P_ARP) {
601 			netdev_info(ieee->dev,
602 				    "=================>DHCP Protocol start tx ARP pkt!!\n");
603 			bdhcp = true;
604 			ieee->lps_delay_cnt =
605 				 ieee->current_network.tim.tim_count;
606 		}
607 	}
608 
609 	skb->priority = rtllib_classify(skb);
610 	crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
611 	encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && crypt && crypt->ops;
612 	if (!encrypt && ieee->ieee802_1x &&
613 	    ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
614 		stats->tx_dropped++;
615 		goto success;
616 	}
617 	if (crypt && !encrypt && ether_type == ETH_P_PAE) {
618 		struct eapol *eap = (struct eapol *)(skb->data +
619 			sizeof(struct ethhdr) - SNAP_SIZE -
620 			sizeof(u16));
621 		netdev_dbg(ieee->dev,
622 			   "TX: IEEE 802.11 EAPOL frame: %s\n",
623 			   eap_get_type(eap->type));
624 	}
625 
626 	/* Advance the SKB to the start of the payload */
627 	skb_pull(skb, sizeof(struct ethhdr));
628 
629 	/* Determine total amount of storage required for TXB packets */
630 	bytes = skb->len + SNAP_SIZE + sizeof(u16);
631 
632 	if (encrypt)
633 		fc = RTLLIB_FTYPE_DATA | IEEE80211_FCTL_PROTECTED;
634 	else
635 		fc = RTLLIB_FTYPE_DATA;
636 
637 	if (qos_activated)
638 		fc |= IEEE80211_STYPE_QOS_DATA;
639 	else
640 		fc |= IEEE80211_STYPE_DATA;
641 
642 	if (ieee->iw_mode == IW_MODE_INFRA) {
643 		fc |= IEEE80211_FCTL_TODS;
644 		/* To DS: Addr1 = BSSID, Addr2 = SA,
645 		 * Addr3 = DA
646 		 */
647 		ether_addr_copy(header.addr1,
648 				ieee->current_network.bssid);
649 		ether_addr_copy(header.addr2, src);
650 		ether_addr_copy(header.addr3, dest);
651 	}
652 
653 	is_multicast = is_multicast_ether_addr(header.addr1);
654 
655 	header.frame_control = cpu_to_le16(fc);
656 
657 	/* Determine fragmentation size based on destination (multicast
658 	 * and broadcast are not fragmented)
659 	 */
660 	if (is_multicast) {
661 		frag_size = MAX_FRAG_THRESHOLD;
662 		qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
663 	} else {
664 		frag_size = ieee->fts;
665 		qos_ctl = 0;
666 	}
667 
668 	if (qos_activated) {
669 		hdr_len = RTLLIB_3ADDR_LEN + 2;
670 
671 		/* in case we are a client verify acm is not set for this ac */
672 		while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
673 			netdev_info(ieee->dev, "skb->priority = %x\n",
674 				    skb->priority);
675 			if (wme_downgrade_ac(skb))
676 				break;
677 			netdev_info(ieee->dev, "converted skb->priority = %x\n",
678 				    skb->priority);
679 		}
680 
681 		qos_ctl |= skb->priority;
682 		header.qos_ctrl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
683 
684 	} else {
685 		hdr_len = RTLLIB_3ADDR_LEN;
686 	}
687 	/* Determine amount of payload per fragment.  Regardless of if
688 	 * this stack is providing the full 802.11 header, one will
689 	 * eventually be affixed to this fragment -- so we must account
690 	 * for it when determining the amount of payload space.
691 	 */
692 	bytes_per_frag = frag_size - hdr_len;
693 	if (ieee->config &
694 	   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
695 		bytes_per_frag -= RTLLIB_FCS_LEN;
696 
697 	/* Each fragment may need to have room for encrypting
698 	 * pre/postfix
699 	 */
700 	if (encrypt) {
701 		bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
702 			crypt->ops->extra_mpdu_postfix_len +
703 			crypt->ops->extra_msdu_prefix_len +
704 			crypt->ops->extra_msdu_postfix_len;
705 	}
706 	/* Number of fragments is the total bytes_per_frag /
707 	 * payload_per_fragment
708 	 */
709 	nr_frags = bytes / bytes_per_frag;
710 	bytes_last_frag = bytes % bytes_per_frag;
711 	if (bytes_last_frag)
712 		nr_frags++;
713 	else
714 		bytes_last_frag = bytes_per_frag;
715 
716 	/* When we allocate the TXB we allocate enough space for the
717 	 * reserve and full fragment bytes (bytes_per_frag doesn't
718 	 * include prefix, postfix, header, FCS, etc.)
719 	 */
720 	txb = rtllib_alloc_txb(nr_frags, frag_size +
721 			       ieee->tx_headroom, GFP_ATOMIC);
722 	if (unlikely(!txb)) {
723 		netdev_warn(ieee->dev, "Could not allocate TXB\n");
724 		goto failed;
725 	}
726 	txb->encrypted = encrypt;
727 	txb->payload_size = cpu_to_le16(bytes);
728 
729 	if (qos_activated)
730 		txb->queue_index = UP2AC(skb->priority);
731 	else
732 		txb->queue_index = WME_AC_BE;
733 
734 	for (i = 0; i < nr_frags; i++) {
735 		skb_frag = txb->fragments[i];
736 		tcb_desc = (struct cb_desc *)(skb_frag->cb +
737 			    MAX_DEV_ADDR_SIZE);
738 		if (qos_activated) {
739 			skb_frag->priority = skb->priority;
740 			tcb_desc->queue_index =  UP2AC(skb->priority);
741 		} else {
742 			skb_frag->priority = WME_AC_BE;
743 			tcb_desc->queue_index = WME_AC_BE;
744 		}
745 		skb_reserve(skb_frag, ieee->tx_headroom);
746 
747 		if (encrypt) {
748 			if (ieee->hwsec_active)
749 				tcb_desc->hw_sec = 1;
750 			else
751 				tcb_desc->hw_sec = 0;
752 			skb_reserve(skb_frag,
753 				    crypt->ops->extra_mpdu_prefix_len +
754 				    crypt->ops->extra_msdu_prefix_len);
755 		} else {
756 			tcb_desc->hw_sec = 0;
757 		}
758 		frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
759 
760 		/* If this is not the last fragment, then add the
761 		 * MOREFRAGS bit to the frame control
762 		 */
763 		if (i != nr_frags - 1) {
764 			frag_hdr->frame_control = cpu_to_le16(fc |
765 							  IEEE80211_FCTL_MOREFRAGS);
766 			bytes = bytes_per_frag;
767 
768 		} else {
769 			/* The last fragment has the remaining length */
770 			bytes = bytes_last_frag;
771 		}
772 		if ((qos_activated) && (!is_multicast)) {
773 			frag_hdr->seq_ctrl =
774 				 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
775 								 header.addr1));
776 			frag_hdr->seq_ctrl =
777 				 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctrl) << 4 | i);
778 		} else {
779 			frag_hdr->seq_ctrl =
780 				 cpu_to_le16(ieee->seq_ctrl[0] << 4 | i);
781 		}
782 		/* Put a SNAP header on the first fragment */
783 		if (i == 0) {
784 			rtllib_put_snap(skb_put(skb_frag,
785 						SNAP_SIZE +
786 						sizeof(u16)), ether_type);
787 			bytes -= SNAP_SIZE + sizeof(u16);
788 		}
789 
790 		skb_put_data(skb_frag, skb->data, bytes);
791 
792 		/* Advance the SKB... */
793 		skb_pull(skb, bytes);
794 
795 		/* Encryption routine will move the header forward in
796 		 * order to insert the IV between the header and the
797 		 * payload
798 		 */
799 		if (encrypt)
800 			rtllib_encrypt_fragment(ieee, skb_frag,
801 						hdr_len);
802 		if (ieee->config &
803 		   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
804 			skb_put(skb_frag, 4);
805 	}
806 
807 	if ((qos_activated) && (!is_multicast)) {
808 		if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
809 			ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
810 		else
811 			ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
812 	} else {
813 		if (ieee->seq_ctrl[0] == 0xFFF)
814 			ieee->seq_ctrl[0] = 0;
815 		else
816 			ieee->seq_ctrl[0]++;
817 	}
818 
819  success:
820 	if (txb) {
821 		tcb_desc = (struct cb_desc *)
822 				(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
823 		tcb_desc->tx_enable_fw_calc_dur = 1;
824 		tcb_desc->priority = skb->priority;
825 
826 		if (ether_type == ETH_P_PAE) {
827 			if (ieee->ht_info->iot_action &
828 			    HT_IOT_ACT_WA_IOT_Broadcom) {
829 				tcb_desc->data_rate =
830 					 mgnt_query_tx_rate_exclude_cck_rates(ieee);
831 				tcb_desc->tx_dis_rate_fallback = false;
832 			} else {
833 				tcb_desc->data_rate = ieee->basic_rate;
834 				tcb_desc->tx_dis_rate_fallback = 1;
835 			}
836 
837 			tcb_desc->ratr_index = 7;
838 			tcb_desc->tx_use_drv_assinged_rate = 1;
839 		} else {
840 			if (is_multicast_ether_addr(header.addr1))
841 				tcb_desc->multicast = 1;
842 			if (is_broadcast_ether_addr(header.addr1))
843 				tcb_desc->broadcast = 1;
844 			rtllib_txrate_selectmode(ieee, tcb_desc);
845 			if (tcb_desc->multicast ||  tcb_desc->broadcast)
846 				tcb_desc->data_rate = ieee->basic_rate;
847 			else
848 				tcb_desc->data_rate = rtllib_current_rate(ieee);
849 
850 			if (bdhcp) {
851 				if (ieee->ht_info->iot_action &
852 				    HT_IOT_ACT_WA_IOT_Broadcom) {
853 					tcb_desc->data_rate =
854 					   mgnt_query_tx_rate_exclude_cck_rates(ieee);
855 					tcb_desc->tx_dis_rate_fallback = false;
856 				} else {
857 					tcb_desc->data_rate = MGN_1M;
858 					tcb_desc->tx_dis_rate_fallback = 1;
859 				}
860 
861 				tcb_desc->ratr_index = 7;
862 				tcb_desc->tx_use_drv_assinged_rate = 1;
863 				tcb_desc->bdhcp = 1;
864 			}
865 
866 			rtllib_query_short_preamble_mode(ieee, tcb_desc);
867 			rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
868 						tcb_desc);
869 			rtllib_query_ht_cap_short_gi(ieee, tcb_desc);
870 			rtllib_query_bandwidth_mode(ieee, tcb_desc);
871 			rtllib_query_protectionmode(ieee, tcb_desc,
872 						    txb->fragments[0]);
873 		}
874 	}
875 	spin_unlock_irqrestore(&ieee->lock, flags);
876 	dev_kfree_skb_any(skb);
877 	if (txb) {
878 		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
879 			dev->stats.tx_packets++;
880 			dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
881 			rtllib_softmac_xmit(txb, ieee);
882 		} else {
883 			rtllib_txb_free(txb);
884 		}
885 	}
886 
887 	return 0;
888 
889  failed:
890 	spin_unlock_irqrestore(&ieee->lock, flags);
891 	netif_stop_queue(dev);
892 	stats->tx_errors++;
893 	return 1;
894 }
895 
rtllib_xmit(struct sk_buff * skb,struct net_device * dev)896 netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
897 {
898 	memset(skb->cb, 0, sizeof(skb->cb));
899 	return rtllib_xmit_inter(skb, dev) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
900 }
901 EXPORT_SYMBOL(rtllib_xmit);
902