Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
14 #include "iwl-trans.h"
15 #include "iwl-nvm-utils.h"
18 #include "time-sync.h"
27 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); in iwl_mvm_bar_check_trigger()
31 ba_trig = (void *)trig->data; in iwl_mvm_bar_check_trigger()
33 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) in iwl_mvm_bar_check_trigger()
36 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_bar_check_trigger()
48 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_tx_csum()
49 u16 mh_len = ieee80211_hdrlen(hdr->frame_control); in iwl_mvm_tx_csum()
55 if (skb->ip_summed != CHECKSUM_PARTIAL) in iwl_mvm_tx_csum()
59 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || in iwl_mvm_tx_csum()
60 (skb->protocol != htons(ETH_P_IP) && in iwl_mvm_tx_csum()
61 skb->protocol != htons(ETH_P_IPV6)), in iwl_mvm_tx_csum()
67 if (skb->protocol == htons(ETH_P_IP)) { in iwl_mvm_tx_csum()
68 protocol = ip_hdr(skb)->protocol; in iwl_mvm_tx_csum()
75 protocol = ipv6h->nexthdr; in iwl_mvm_tx_csum()
88 protocol = hp->nexthdr; in iwl_mvm_tx_csum()
91 /* if we get here - protocol now should be TCP/UDP */ in iwl_mvm_tx_csum()
112 if (skb->protocol == htons(ETH_P_IP) && amsdu) { in iwl_mvm_tx_csum()
113 ip_hdr(skb)->check = 0; in iwl_mvm_tx_csum()
119 tcp_hdr(skb)->check = 0; in iwl_mvm_tx_csum()
121 udp_hdr(skb)->check = 0; in iwl_mvm_tx_csum()
128 * In new Tx API, the IV is always added by the firmware. in iwl_mvm_tx_csum()
130 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && in iwl_mvm_tx_csum()
131 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && in iwl_mvm_tx_csum()
132 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) in iwl_mvm_tx_csum()
133 mh_len += info->control.hw_key->iv_len; in iwl_mvm_tx_csum()
139 else if (ieee80211_hdrlen(hdr->frame_control) % 4) in iwl_mvm_tx_csum()
147 * Sets most of the Tx cmd's fields
153 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_set_tx_cmd()
154 __le16 fc = hdr->frame_control; in iwl_mvm_set_tx_cmd()
155 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); in iwl_mvm_set_tx_cmd()
156 u32 len = skb->len + FCS_LEN; in iwl_mvm_set_tx_cmd()
160 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || in iwl_mvm_set_tx_cmd()
162 !is_multicast_ether_addr(hdr->addr1))) in iwl_mvm_set_tx_cmd()
175 tx_cmd->tid_tspec = qc[0] & 0xf; in iwl_mvm_set_tx_cmd()
179 struct ieee80211_bar *bar = (void *)skb->data; in iwl_mvm_set_tx_cmd()
180 u16 control = le16_to_cpu(bar->control); in iwl_mvm_set_tx_cmd()
181 u16 ssn = le16_to_cpu(bar->start_seq_num); in iwl_mvm_set_tx_cmd()
184 tx_cmd->tid_tspec = (control & in iwl_mvm_set_tx_cmd()
187 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); in iwl_mvm_set_tx_cmd()
188 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, in iwl_mvm_set_tx_cmd()
192 tx_cmd->tid_tspec = IWL_TID_NON_QOS; in iwl_mvm_set_tx_cmd()
194 tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; in iwl_mvm_set_tx_cmd()
196 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) in iwl_mvm_set_tx_cmd()
203 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) in iwl_mvm_set_tx_cmd()
204 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; in iwl_mvm_set_tx_cmd()
213 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); in iwl_mvm_set_tx_cmd()
215 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); in iwl_mvm_set_tx_cmd()
217 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); in iwl_mvm_set_tx_cmd()
219 /* The spec allows Action frames in A-MPDU, we don't support in iwl_mvm_set_tx_cmd()
222 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); in iwl_mvm_set_tx_cmd()
223 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { in iwl_mvm_set_tx_cmd()
224 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); in iwl_mvm_set_tx_cmd()
226 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); in iwl_mvm_set_tx_cmd()
229 if (ieee80211_is_data(fc) && len > mvm->rts_threshold && in iwl_mvm_set_tx_cmd()
230 !is_multicast_ether_addr(hdr->addr1)) in iwl_mvm_set_tx_cmd()
233 if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_set_tx_cmd()
238 tx_cmd->tx_flags = cpu_to_le32(tx_flags); in iwl_mvm_set_tx_cmd()
239 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ in iwl_mvm_set_tx_cmd()
240 tx_cmd->len = cpu_to_le16((u16)skb->len); in iwl_mvm_set_tx_cmd()
241 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); in iwl_mvm_set_tx_cmd()
242 tx_cmd->sta_id = sta_id; in iwl_mvm_set_tx_cmd()
244 tx_cmd->offload_assist = in iwl_mvm_set_tx_cmd()
252 if (info->band == NL80211_BAND_2GHZ && in iwl_mvm_get_tx_ant()
254 return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
259 return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
262 return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
277 info->control.vif); in iwl_mvm_convert_rate_idx()
279 /* Get PLCP rate for tx_cmd->rate_n_flags */ in iwl_mvm_convert_rate_idx()
280 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); in iwl_mvm_convert_rate_idx()
285 if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { in iwl_mvm_convert_rate_idx()
302 struct ieee80211_tx_rate *rate = &info->control.rates[0]; in iwl_mvm_get_inject_tx_rate()
310 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { in iwl_mvm_get_inject_tx_rate()
317 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) in iwl_mvm_get_inject_tx_rate()
319 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
321 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
323 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
326 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) in iwl_mvm_get_inject_tx_rate()
328 } else if (rate->flags & IEEE80211_TX_RC_MCS) { in iwl_mvm_get_inject_tx_rate()
330 result |= u32_encode_bits(rate->idx, in iwl_mvm_get_inject_tx_rate()
333 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) in iwl_mvm_get_inject_tx_rate()
335 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
337 if (info->flags & IEEE80211_TX_CTL_LDPC) in iwl_mvm_get_inject_tx_rate()
339 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) in iwl_mvm_get_inject_tx_rate()
342 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) in iwl_mvm_get_inject_tx_rate()
345 int rate_idx = info->control.rates[0].idx; in iwl_mvm_get_inject_tx_rate()
350 if (info->control.antennas) in iwl_mvm_get_inject_tx_rate()
351 result |= u32_encode_bits(info->control.antennas, in iwl_mvm_get_inject_tx_rate()
363 int rate_idx = -1; in iwl_mvm_get_tx_rate()
365 if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { in iwl_mvm_get_tx_rate()
366 /* info->control is only relevant for non HW rate control */ in iwl_mvm_get_tx_rate()
369 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && in iwl_mvm_get_tx_rate()
372 info->control.rates[0].flags, in iwl_mvm_get_tx_rate()
373 info->control.rates[0].idx, in iwl_mvm_get_tx_rate()
375 sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); in iwl_mvm_get_tx_rate()
377 rate_idx = info->control.rates[0].idx; in iwl_mvm_get_tx_rate()
382 if (info->band != NL80211_BAND_2GHZ || in iwl_mvm_get_tx_rate()
383 (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) in iwl_mvm_get_tx_rate()
397 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) in iwl_mvm_get_tx_rate_n_flags()
405 * Sets the fields in the Tx cmd that are rate related
412 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; in iwl_mvm_set_tx_cmd_rate()
416 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; in iwl_mvm_set_tx_cmd_rate()
417 tx_cmd->rts_retry_limit = in iwl_mvm_set_tx_cmd_rate()
418 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); in iwl_mvm_set_tx_cmd_rate()
420 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; in iwl_mvm_set_tx_cmd_rate()
422 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; in iwl_mvm_set_tx_cmd_rate()
431 !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) { in iwl_mvm_set_tx_cmd_rate()
434 if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { in iwl_mvm_set_tx_cmd_rate()
435 tx_cmd->initial_rate_index = 0; in iwl_mvm_set_tx_cmd_rate()
436 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); in iwl_mvm_set_tx_cmd_rate()
440 tx_cmd->tx_flags |= in iwl_mvm_set_tx_cmd_rate()
444 /* Set the rate in the TX cmd */ in iwl_mvm_set_tx_cmd_rate()
445 tx_cmd->rate_n_flags = in iwl_mvm_set_tx_cmd_rate()
452 struct ieee80211_key_conf *keyconf = info->control.hw_key; in iwl_mvm_set_tx_cmd_pn()
455 pn = atomic64_inc_return(&keyconf->tx_pn); in iwl_mvm_set_tx_cmd_pn()
458 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); in iwl_mvm_set_tx_cmd_pn()
467 * Sets the fields in the Tx cmd that are crypto related
475 struct ieee80211_key_conf *keyconf = info->control.hw_key; in iwl_mvm_set_tx_cmd_crypto()
476 u8 *crypto_hdr = skb_frag->data + hdrlen; in iwl_mvm_set_tx_cmd_crypto()
480 switch (keyconf->cipher) { in iwl_mvm_set_tx_cmd_crypto()
487 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; in iwl_mvm_set_tx_cmd_crypto()
488 pn = atomic64_inc_return(&keyconf->tx_pn); in iwl_mvm_set_tx_cmd_crypto()
490 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); in iwl_mvm_set_tx_cmd_crypto()
494 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; in iwl_mvm_set_tx_cmd_crypto()
497 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | in iwl_mvm_set_tx_cmd_crypto()
498 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & in iwl_mvm_set_tx_cmd_crypto()
501 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); in iwl_mvm_set_tx_cmd_crypto()
514 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; in iwl_mvm_set_tx_cmd_crypto()
515 tx_cmd->key[0] = keyconf->hw_key_idx; in iwl_mvm_set_tx_cmd_crypto()
519 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; in iwl_mvm_set_tx_cmd_crypto()
531 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) in iwl_mvm_use_host_rate()
534 if (likely(ieee80211_is_data(hdr->frame_control) && in iwl_mvm_use_host_rate()
535 mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED)) in iwl_mvm_use_host_rate()
545 return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ; in iwl_mvm_use_host_rate()
555 memcpy(out_hdr->addr3, addr3_override, ETH_ALEN); in iwl_mvm_copy_hdr()
559 * Allocates and sets the Tx cmd the driver data pointers in the skb
567 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in iwl_mvm_set_tx_params()
571 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); in iwl_mvm_set_tx_params()
576 dev_cmd->hdr.cmd = TX_CMD; in iwl_mvm_set_tx_params()
585 if (ieee80211_is_data_qos(hdr->frame_control)) { in iwl_mvm_set_tx_params()
591 if (!info->control.hw_key) in iwl_mvm_set_tx_params()
604 hdr->frame_control); in iwl_mvm_set_tx_params()
605 } else if (!ieee80211_is_data(hdr->frame_control) || in iwl_mvm_set_tx_params()
606 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { in iwl_mvm_set_tx_params()
611 if (mvm->trans->trans_cfg->device_family >= in iwl_mvm_set_tx_params()
613 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; in iwl_mvm_set_tx_params()
617 cmd->offload_assist = cpu_to_le32(offload_assist); in iwl_mvm_set_tx_params()
620 cmd->len = cpu_to_le16((u16)skb->len); in iwl_mvm_set_tx_params()
623 iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); in iwl_mvm_set_tx_params()
625 cmd->flags = cpu_to_le16(flags); in iwl_mvm_set_tx_params()
626 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); in iwl_mvm_set_tx_params()
628 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; in iwl_mvm_set_tx_params()
632 cmd->offload_assist = cpu_to_le16(offload_assist); in iwl_mvm_set_tx_params()
635 cmd->len = cpu_to_le16((u16)skb->len); in iwl_mvm_set_tx_params()
638 iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); in iwl_mvm_set_tx_params()
640 cmd->flags = cpu_to_le32(flags); in iwl_mvm_set_tx_params()
641 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); in iwl_mvm_set_tx_params()
646 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; in iwl_mvm_set_tx_params()
648 if (info->control.hw_key) in iwl_mvm_set_tx_params()
653 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); in iwl_mvm_set_tx_params()
656 iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override); in iwl_mvm_set_tx_params()
667 memset(&skb_info->status, 0, sizeof(skb_info->status)); in iwl_mvm_skb_prepare_status()
668 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); in iwl_mvm_skb_prepare_status()
670 skb_info->driver_data[1] = cmd; in iwl_mvm_skb_prepare_status()
678 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_get_ctrl_vif_queue()
679 __le16 fc = hdr->frame_control; in iwl_mvm_get_ctrl_vif_queue()
681 switch (info->control.vif->type) { in iwl_mvm_get_ctrl_vif_queue()
685 * Non-bufferable frames use the broadcast station, thus they in iwl_mvm_get_ctrl_vif_queue()
686 * use the probe queue. in iwl_mvm_get_ctrl_vif_queue()
689 * response (with non-success status) for a station we can't in iwl_mvm_get_ctrl_vif_queue()
697 return link->mgmt_queue; in iwl_mvm_get_ctrl_vif_queue()
700 is_multicast_ether_addr(hdr->addr1)) in iwl_mvm_get_ctrl_vif_queue()
701 return link->cab_queue; in iwl_mvm_get_ctrl_vif_queue()
703 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, in iwl_mvm_get_ctrl_vif_queue()
705 return link->mgmt_queue; in iwl_mvm_get_ctrl_vif_queue()
708 return mvm->p2p_dev_queue; in iwl_mvm_get_ctrl_vif_queue()
711 return mvm->p2p_dev_queue; in iwl_mvm_get_ctrl_vif_queue()
713 WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); in iwl_mvm_get_ctrl_vif_queue()
714 return -1; in iwl_mvm_get_ctrl_vif_queue()
723 iwl_mvm_vif_from_mac80211(info->control.vif); in iwl_mvm_probe_resp_set_noa()
724 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; in iwl_mvm_probe_resp_set_noa()
725 int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; in iwl_mvm_probe_resp_set_noa()
738 resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data); in iwl_mvm_probe_resp_set_noa()
742 if (!resp_data->notif.noa_active) in iwl_mvm_probe_resp_set_noa()
746 mgmt->u.probe_resp.variable, in iwl_mvm_probe_resp_set_noa()
747 skb->len - base_len, in iwl_mvm_probe_resp_set_noa()
754 if (skb_tailroom(skb) < resp_data->noa_len) { in iwl_mvm_probe_resp_set_noa()
755 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { in iwl_mvm_probe_resp_set_noa()
762 pos = skb_put(skb, resp_data->noa_len); in iwl_mvm_probe_resp_set_noa()
766 *pos++ = resp_data->noa_len - 2; in iwl_mvm_probe_resp_set_noa()
772 memcpy(pos, &resp_data->notif.noa_attr, in iwl_mvm_probe_resp_set_noa()
773 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); in iwl_mvm_probe_resp_set_noa()
781 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in iwl_mvm_tx_skb_non_sta()
785 int hdrlen = ieee80211_hdrlen(hdr->frame_control); in iwl_mvm_tx_skb_non_sta()
786 __le16 fc = hdr->frame_control; in iwl_mvm_tx_skb_non_sta()
787 bool offchannel = IEEE80211_SKB_CB(skb)->flags & in iwl_mvm_tx_skb_non_sta()
789 int queue = -1; in iwl_mvm_tx_skb_non_sta() local
792 return -1; in iwl_mvm_tx_skb_non_sta()
794 memcpy(&info, skb->cb, sizeof(info)); in iwl_mvm_tx_skb_non_sta()
796 if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) in iwl_mvm_tx_skb_non_sta()
797 return -1; in iwl_mvm_tx_skb_non_sta()
800 return -1; in iwl_mvm_tx_skb_non_sta()
807 if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE && in iwl_mvm_tx_skb_non_sta()
809 (info.control.vif->type == NL80211_IFTYPE_STATION && in iwl_mvm_tx_skb_non_sta()
815 * P2P Device uses the offchannel queue. in iwl_mvm_tx_skb_non_sta()
817 * and hence needs to be sent on the aux queue. in iwl_mvm_tx_skb_non_sta()
819 * also P2P Device uses the aux queue. in iwl_mvm_tx_skb_non_sta()
821 sta_id = mvm->aux_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
822 queue = mvm->aux_queue; in iwl_mvm_tx_skb_non_sta()
823 if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE)) in iwl_mvm_tx_skb_non_sta()
824 return -1; in iwl_mvm_tx_skb_non_sta()
825 } else if (info.control.vif->type == in iwl_mvm_tx_skb_non_sta()
827 info.control.vif->type == NL80211_IFTYPE_AP || in iwl_mvm_tx_skb_non_sta()
828 info.control.vif->type == NL80211_IFTYPE_ADHOC) { in iwl_mvm_tx_skb_non_sta()
834 if (info.control.vif->active_links) in iwl_mvm_tx_skb_non_sta()
835 link_id = ffs(info.control.vif->active_links) - 1; in iwl_mvm_tx_skb_non_sta()
840 link = mvmvif->link[link_id]; in iwl_mvm_tx_skb_non_sta()
842 return -1; in iwl_mvm_tx_skb_non_sta()
844 if (!ieee80211_is_data(hdr->frame_control)) in iwl_mvm_tx_skb_non_sta()
845 sta_id = link->bcast_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
847 sta_id = link->mcast_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
849 queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info, in iwl_mvm_tx_skb_non_sta()
851 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { in iwl_mvm_tx_skb_non_sta()
852 queue = mvm->snif_queue; in iwl_mvm_tx_skb_non_sta()
853 sta_id = mvm->snif_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
857 if (queue < 0) { in iwl_mvm_tx_skb_non_sta()
858 IWL_ERR(mvm, "No queue was found. Dropping TX\n"); in iwl_mvm_tx_skb_non_sta()
859 return -1; in iwl_mvm_tx_skb_non_sta()
865 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); in iwl_mvm_tx_skb_non_sta()
870 return -1; in iwl_mvm_tx_skb_non_sta()
872 /* From now on, we cannot access info->control */ in iwl_mvm_tx_skb_non_sta()
875 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { in iwl_mvm_tx_skb_non_sta()
876 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_skb_non_sta()
877 return -1; in iwl_mvm_tx_skb_non_sta()
894 if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_max_amsdu_size()
901 * Add a security margin of 256 for the TX command + headers. in iwl_mvm_max_amsdu_size()
905 val = mvmsta->max_amsdu_len; in iwl_mvm_max_amsdu_size()
907 if (hweight16(sta->valid_links) <= 1) { in iwl_mvm_max_amsdu_size()
908 if (sta->valid_links) { in iwl_mvm_max_amsdu_size()
910 unsigned int link = ffs(sta->valid_links) - 1; in iwl_mvm_max_amsdu_size()
913 link_conf = rcu_dereference(mvmsta->vif->link_conf[link]); in iwl_mvm_max_amsdu_size()
917 band = link_conf->chanreq.oper.chan->band; in iwl_mvm_max_amsdu_size()
920 band = mvmsta->vif->bss_conf.chanreq.oper.chan->band; in iwl_mvm_max_amsdu_size()
924 } else if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_max_amsdu_size()
929 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); in iwl_mvm_max_amsdu_size()
936 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); in iwl_mvm_max_amsdu_size()
947 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_tx_tso_segment()
948 char cb[sizeof(skb->cb)]; in iwl_mvm_tx_tso_segment()
951 unsigned int mss = skb_shinfo(skb)->gso_size; in iwl_mvm_tx_tso_segment()
952 bool ipv4 = (skb->protocol == htons(ETH_P_IP)); in iwl_mvm_tx_tso_segment()
953 bool qos = ieee80211_is_data_qos(hdr->frame_control); in iwl_mvm_tx_tso_segment()
954 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; in iwl_mvm_tx_tso_segment()
956 skb_shinfo(skb)->gso_size = num_subframes * mss; in iwl_mvm_tx_tso_segment()
957 memcpy(cb, skb->cb, sizeof(cb)); in iwl_mvm_tx_tso_segment()
960 skb_shinfo(skb)->gso_size = mss; in iwl_mvm_tx_tso_segment()
961 skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; in iwl_mvm_tx_tso_segment()
963 if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM) in iwl_mvm_tx_tso_segment()
964 return -ENOMEM; in iwl_mvm_tx_tso_segment()
974 memcpy(tmp->cb, cb, sizeof(tmp->cb)); in iwl_mvm_tx_tso_segment()
976 * Compute the length of all the data added for the A-MSDU. in iwl_mvm_tx_tso_segment()
977 * This will be used to compute the length to write in the TX in iwl_mvm_tx_tso_segment()
978 * command. We have: SNAP + IP + TCP for n -1 subframes and in iwl_mvm_tx_tso_segment()
981 tcp_payload_len = skb_tail_pointer(tmp) - in iwl_mvm_tx_tso_segment()
982 skb_transport_header(tmp) - in iwl_mvm_tx_tso_segment()
983 tcp_hdrlen(tmp) + tmp->data_len; in iwl_mvm_tx_tso_segment()
986 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); in iwl_mvm_tx_tso_segment()
989 skb_shinfo(tmp)->gso_size = mss; in iwl_mvm_tx_tso_segment()
990 skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : in iwl_mvm_tx_tso_segment()
999 qc = ieee80211_get_qos_ctl((void *)tmp->data); in iwl_mvm_tx_tso_segment()
1002 skb_shinfo(tmp)->gso_size = 0; in iwl_mvm_tx_tso_segment()
1019 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_tx_tso()
1020 unsigned int mss = skb_shinfo(skb)->gso_size; in iwl_mvm_tx_tso()
1028 if (!mvmsta->max_amsdu_len || in iwl_mvm_tx_tso()
1029 !ieee80211_is_data_qos(hdr->frame_control) || in iwl_mvm_tx_tso()
1030 !mvmsta->amsdu_enabled) in iwl_mvm_tx_tso()
1037 if (skb->protocol == htons(ETH_P_IPV6) && in iwl_mvm_tx_tso()
1038 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != in iwl_mvm_tx_tso()
1046 return -EINVAL; in iwl_mvm_tx_tso()
1052 if ((info->flags & IEEE80211_TX_CTL_AMPDU && in iwl_mvm_tx_tso()
1053 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || in iwl_mvm_tx_tso()
1054 !(mvmsta->amsdu_enabled & BIT(tid))) in iwl_mvm_tx_tso()
1061 min_t(unsigned int, sta->cur->max_amsdu_len, in iwl_mvm_tx_tso()
1065 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not in iwl_mvm_tx_tso()
1066 * supported. This is a spec requirement (IEEE 802.11-2015 in iwl_mvm_tx_tso()
1069 if (info->flags & IEEE80211_TX_CTL_AMPDU && in iwl_mvm_tx_tso()
1070 !sta->deflink.vht_cap.vht_supported) in iwl_mvm_tx_tso()
1075 pad = (4 - subf_len) & 0x3; in iwl_mvm_tx_tso()
1078 * If we have N subframes in the A-MSDU, then the A-MSDU's size is in iwl_mvm_tx_tso()
1079 * N * subf_len + (N - 1) * pad. in iwl_mvm_tx_tso()
1083 if (sta->max_amsdu_subframes && in iwl_mvm_tx_tso()
1084 num_subframes > sta->max_amsdu_subframes) in iwl_mvm_tx_tso()
1085 num_subframes = sta->max_amsdu_subframes; in iwl_mvm_tx_tso()
1087 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - in iwl_mvm_tx_tso()
1088 tcp_hdrlen(skb) + skb->data_len; in iwl_mvm_tx_tso()
1091 * Make sure we have enough TBs for the A-MSDU: in iwl_mvm_tx_tso()
1096 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > in iwl_mvm_tx_tso()
1097 mvm->trans->max_skb_frags) in iwl_mvm_tx_tso()
1103 /* This skb fits in one single A-MSDU */ in iwl_mvm_tx_tso()
1111 * create SKBs that can fit into one A-MSDU. in iwl_mvm_tx_tso()
1125 return -1; in iwl_mvm_tx_tso()
1129 /* Check if there are any timed-out TIDs on a given shared TXQ */
1132 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; in iwl_mvm_txq_should_update()
1140 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + in iwl_mvm_txq_should_update()
1152 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; in iwl_mvm_tx_airtime()
1158 mdata = &mvm->tcm.data[mac]; in iwl_mvm_tx_airtime()
1160 if (mvm->tcm.paused) in iwl_mvm_tx_airtime()
1163 if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) in iwl_mvm_tx_airtime()
1164 schedule_delayed_work(&mvm->tcm.work, 0); in iwl_mvm_tx_airtime()
1166 mdata->tx.airtime += airtime; in iwl_mvm_tx_airtime()
1173 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; in iwl_mvm_tx_pkt_queued()
1177 return -EINVAL; in iwl_mvm_tx_pkt_queued()
1179 mdata = &mvm->tcm.data[mac]; in iwl_mvm_tx_pkt_queued()
1181 mdata->tx.pkts[ac]++; in iwl_mvm_tx_pkt_queued()
1187 * Sets the fields in the Tx cmd that are crypto related.
1196 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in iwl_mvm_tx_mpdu()
1207 return -1; in iwl_mvm_tx_mpdu()
1210 fc = hdr->frame_control; in iwl_mvm_tx_mpdu()
1214 return -1; in iwl_mvm_tx_mpdu()
1216 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) in iwl_mvm_tx_mpdu()
1217 return -1; in iwl_mvm_tx_mpdu()
1219 if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) in iwl_mvm_tx_mpdu()
1220 return -1; in iwl_mvm_tx_mpdu()
1226 sta, mvmsta->deflink.sta_id, in iwl_mvm_tx_mpdu()
1232 * we handle that entirely ourselves -- for uAPSD the firmware in iwl_mvm_tx_mpdu()
1233 * will always send a notification, and for PS-Poll responses in iwl_mvm_tx_mpdu()
1236 info->flags &= ~IEEE80211_TX_STATUS_EOSP; in iwl_mvm_tx_mpdu()
1238 spin_lock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1240 /* nullfunc frames should go to the MGMT queue regardless of QOS, in iwl_mvm_tx_mpdu()
1249 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; in iwl_mvm_tx_mpdu()
1251 mvmsta->tid_data[tid].state != IWL_AGG_ON, in iwl_mvm_tx_mpdu()
1253 mvmsta->tid_data[tid].state, tid)) in iwl_mvm_tx_mpdu()
1256 seq_number = mvmsta->tid_data[tid].seq_number; in iwl_mvm_tx_mpdu()
1260 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; in iwl_mvm_tx_mpdu()
1262 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); in iwl_mvm_tx_mpdu()
1263 hdr->seq_ctrl |= cpu_to_le16(seq_number); in iwl_mvm_tx_mpdu()
1265 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; in iwl_mvm_tx_mpdu()
1272 txq_id = mvmsta->tid_data[tid].txq_id; in iwl_mvm_tx_mpdu()
1274 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); in iwl_mvm_tx_mpdu()
1277 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_mpdu()
1278 spin_unlock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1279 return -1; in iwl_mvm_tx_mpdu()
1284 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; in iwl_mvm_tx_mpdu()
1287 * If we have timed-out TIDs - schedule the worker that will in iwl_mvm_tx_mpdu()
1291 * the TX flow. This isn't dangerous because scheduling in iwl_mvm_tx_mpdu()
1292 * mvm->add_stream_wk can't ruin the state, and if we DON'T in iwl_mvm_tx_mpdu()
1293 * schedule it due to some race condition then next TX we get in iwl_mvm_tx_mpdu()
1296 if (unlikely(mvm->queue_info[txq_id].status == in iwl_mvm_tx_mpdu()
1299 schedule_work(&mvm->add_stream_wk); in iwl_mvm_tx_mpdu()
1302 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", in iwl_mvm_tx_mpdu()
1303 mvmsta->deflink.sta_id, tid, txq_id, in iwl_mvm_tx_mpdu()
1304 IEEE80211_SEQ_TO_SN(seq_number), skb->len); in iwl_mvm_tx_mpdu()
1306 /* From now on, we cannot access info->control */ in iwl_mvm_tx_mpdu()
1310 * The IV is introduced by the HW for new tx api, and it is not present in iwl_mvm_tx_mpdu()
1316 info->control.hw_key && in iwl_mvm_tx_mpdu()
1318 info->control.hw_key->iv_len : 0); in iwl_mvm_tx_mpdu()
1320 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) in iwl_mvm_tx_mpdu()
1324 mvmsta->tid_data[tid].seq_number = seq_number + 0x10; in iwl_mvm_tx_mpdu()
1326 spin_unlock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1335 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_mpdu()
1336 spin_unlock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1338 IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id, in iwl_mvm_tx_mpdu()
1340 return -1; in iwl_mvm_tx_mpdu()
1356 return -1; in iwl_mvm_tx_skb_sta()
1360 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) in iwl_mvm_tx_skb_sta()
1361 return -1; in iwl_mvm_tx_skb_sta()
1363 memcpy(&info, skb->cb, sizeof(info)); in iwl_mvm_tx_skb_sta()
1368 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - in iwl_mvm_tx_skb_sta()
1369 tcp_hdrlen(skb) + skb->data_len; in iwl_mvm_tx_skb_sta()
1371 if (payload_len <= skb_shinfo(skb)->gso_size) in iwl_mvm_tx_skb_sta()
1378 return -1; in iwl_mvm_tx_skb_sta()
1387 * As described in IEEE sta 802.11-2020, table 9-30 (Address in iwl_mvm_tx_skb_sta()
1388 * field contents), A-MSDU address 3 should contain the BSSID in iwl_mvm_tx_skb_sta()
1393 * A-MSDU subframe headers from it. in iwl_mvm_tx_skb_sta()
1395 switch (vif->type) { in iwl_mvm_tx_skb_sta()
1397 addr3 = vif->cfg.ap_addr; in iwl_mvm_tx_skb_sta()
1400 addr3 = vif->addr; in iwl_mvm_tx_skb_sta()
1412 hdr = (void *)skb->data; in iwl_mvm_tx_skb_sta()
1413 amsdu = ieee80211_is_data_qos(hdr->frame_control) && in iwl_mvm_tx_skb_sta()
1426 ieee80211_free_txskb(mvm->hw, skb); in iwl_mvm_tx_skb_sta()
1441 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; in iwl_mvm_check_ratid_empty()
1442 struct ieee80211_vif *vif = mvmsta->vif; in iwl_mvm_check_ratid_empty()
1445 lockdep_assert_held(&mvmsta->lock); in iwl_mvm_check_ratid_empty()
1447 if ((tid_data->state == IWL_AGG_ON || in iwl_mvm_check_ratid_empty()
1448 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && in iwl_mvm_check_ratid_empty()
1451 * Now that this aggregation or DQA queue is empty tell in iwl_mvm_check_ratid_empty()
1462 normalized_ssn = tid_data->ssn; in iwl_mvm_check_ratid_empty()
1463 if (mvm->trans->trans_cfg->gen2) in iwl_mvm_check_ratid_empty()
1466 if (normalized_ssn != tid_data->next_reclaimed) in iwl_mvm_check_ratid_empty()
1469 switch (tid_data->state) { in iwl_mvm_check_ratid_empty()
1473 tid_data->next_reclaimed); in iwl_mvm_check_ratid_empty()
1474 tid_data->state = IWL_AGG_STARTING; in iwl_mvm_check_ratid_empty()
1475 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); in iwl_mvm_check_ratid_empty()
1481 tid_data->next_reclaimed); in iwl_mvm_check_ratid_empty()
1482 tid_data->state = IWL_AGG_OFF; in iwl_mvm_check_ratid_empty()
1483 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); in iwl_mvm_check_ratid_empty()
1556 r->flags |= in iwl_mvm_hwrate_to_tx_rate()
1561 r->flags |= IEEE80211_TX_RC_SHORT_GI; in iwl_mvm_hwrate_to_tx_rate()
1563 r->flags |= IEEE80211_TX_RC_MCS; in iwl_mvm_hwrate_to_tx_rate()
1564 r->idx = rate; in iwl_mvm_hwrate_to_tx_rate()
1569 r->flags |= IEEE80211_TX_RC_VHT_MCS; in iwl_mvm_hwrate_to_tx_rate()
1573 r->idx = 0; in iwl_mvm_hwrate_to_tx_rate()
1575 r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, in iwl_mvm_hwrate_to_tx_rate()
1585 r->flags |= IEEE80211_TX_RC_GREEN_FIELD; in iwl_mvm_hwrate_to_tx_rate_v1()
1587 r->flags |= in iwl_mvm_hwrate_to_tx_rate_v1()
1592 r->flags |= IEEE80211_TX_RC_SHORT_GI; in iwl_mvm_hwrate_to_tx_rate_v1()
1594 r->flags |= IEEE80211_TX_RC_MCS; in iwl_mvm_hwrate_to_tx_rate_v1()
1595 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; in iwl_mvm_hwrate_to_tx_rate_v1()
1600 r->flags |= IEEE80211_TX_RC_VHT_MCS; in iwl_mvm_hwrate_to_tx_rate_v1()
1602 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, in iwl_mvm_hwrate_to_tx_rate_v1()
1608 * translate ucode response to mac80211 tx status control values
1614 struct ieee80211_tx_rate *r = &info->status.rates[0]; in iwl_mvm_hwrate_to_tx_status()
1620 info->status.antenna = in iwl_mvm_hwrate_to_tx_status()
1623 info->band, r); in iwl_mvm_hwrate_to_tx_status()
1640 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_tx_status_check_trigger()
1645 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, in iwl_mvm_tx_status_check_trigger()
1650 status_trig = (void *)trig->data; in iwl_mvm_tx_status_check_trigger()
1652 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { in iwl_mvm_tx_status_check_trigger()
1654 if (!status_trig->statuses[i].status) in iwl_mvm_tx_status_check_trigger()
1657 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) in iwl_mvm_tx_status_check_trigger()
1660 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_tx_status_check_trigger()
1661 "Tx status %d was received", in iwl_mvm_tx_status_check_trigger()
1668 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1669 * @tx_resp: the Tx response from the fw (agg or non-agg)
1674 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1680 * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
1686 tx_resp->frame_count); in iwl_mvm_get_scd_ssn()
1688 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_mvm_get_scd_ssn()
1694 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_single() argument
1697 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in iwl_mvm_rx_tx_cmd_single()
1700 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd_single()
1701 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_single()
1702 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_single()
1705 u32 status = le16_to_cpu(agg_status->status); in iwl_mvm_rx_tx_cmd_single()
1716 txq_id = le16_to_cpu(tx_resp->tx_queue); in iwl_mvm_rx_tx_cmd_single()
1718 seq_ctl = le16_to_cpu(tx_resp->seq_ctl); in iwl_mvm_rx_tx_cmd_single()
1721 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false); in iwl_mvm_rx_tx_cmd_single()
1726 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_rx_tx_cmd_single()
1731 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_rx_tx_cmd_single()
1733 memset(&info->status, 0, sizeof(info->status)); in iwl_mvm_rx_tx_cmd_single()
1734 info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); in iwl_mvm_rx_tx_cmd_single()
1740 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mvm_rx_tx_cmd_single()
1747 /* the FW should have stopped the queue and not in iwl_mvm_rx_tx_cmd_single()
1751 "FW reported TX filtered, status=0x%x, FC=0x%x\n", in iwl_mvm_rx_tx_cmd_single()
1752 status, le16_to_cpu(hdr->frame_control)); in iwl_mvm_rx_tx_cmd_single()
1753 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; in iwl_mvm_rx_tx_cmd_single()
1760 ieee80211_is_mgmt(hdr->frame_control)) in iwl_mvm_rx_tx_cmd_single()
1761 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); in iwl_mvm_rx_tx_cmd_single()
1769 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mvm_rx_tx_cmd_single()
1771 iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control); in iwl_mvm_rx_tx_cmd_single()
1773 info->status.rates[0].count = tx_resp->failure_frame + 1; in iwl_mvm_rx_tx_cmd_single()
1775 iwl_mvm_hwrate_to_tx_status(mvm->fw, in iwl_mvm_rx_tx_cmd_single()
1776 le32_to_cpu(tx_resp->initial_rate), in iwl_mvm_rx_tx_cmd_single()
1782 info->status.status_driver_data[1] = in iwl_mvm_rx_tx_cmd_single()
1783 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); in iwl_mvm_rx_tx_cmd_single()
1785 /* Single frame failure in an AMPDU queue => send BAR */ in iwl_mvm_rx_tx_cmd_single()
1786 if (info->flags & IEEE80211_TX_CTL_AMPDU && in iwl_mvm_rx_tx_cmd_single()
1787 !(info->flags & IEEE80211_TX_STAT_ACK) && in iwl_mvm_rx_tx_cmd_single()
1788 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) in iwl_mvm_rx_tx_cmd_single()
1789 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; in iwl_mvm_rx_tx_cmd_single()
1790 info->flags &= ~IEEE80211_TX_CTL_AMPDU; in iwl_mvm_rx_tx_cmd_single()
1793 if (ieee80211_is_back_req(hdr->frame_control)) in iwl_mvm_rx_tx_cmd_single()
1796 seq_ctl = le16_to_cpu(hdr->seq_ctrl); in iwl_mvm_rx_tx_cmd_single()
1802 * reason, NDPs are never sent to A-MPDU'able queues in iwl_mvm_rx_tx_cmd_single()
1804 * for a single Tx resonse (see WARN_ON below). in iwl_mvm_rx_tx_cmd_single()
1806 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) in iwl_mvm_rx_tx_cmd_single()
1814 info->status.tx_time = in iwl_mvm_rx_tx_cmd_single()
1815 le16_to_cpu(tx_resp->wireless_media_time); in iwl_mvm_rx_tx_cmd_single()
1816 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); in iwl_mvm_rx_tx_cmd_single()
1817 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); in iwl_mvm_rx_tx_cmd_single()
1818 info->status.status_driver_data[0] = in iwl_mvm_rx_tx_cmd_single()
1819 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); in iwl_mvm_rx_tx_cmd_single()
1821 if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1))) in iwl_mvm_rx_tx_cmd_single()
1822 ieee80211_tx_status_skb(mvm->hw, skb); in iwl_mvm_rx_tx_cmd_single()
1825 /* This is an aggregation queue or might become one, so we use in iwl_mvm_rx_tx_cmd_single()
1828 * this Tx response relates. But if there is a hole in the in iwl_mvm_rx_tx_cmd_single()
1829 * bitmap of the BA we received, this Tx response may allow to in iwl_mvm_rx_tx_cmd_single()
1847 le32_to_cpu(tx_resp->initial_rate), in iwl_mvm_rx_tx_cmd_single()
1848 tx_resp->failure_frame, SEQ_TO_INDEX(sequence), in iwl_mvm_rx_tx_cmd_single()
1853 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_single()
1856 * the firmware while we still have packets for it in the Tx queues. in iwl_mvm_rx_tx_cmd_single()
1865 le16_to_cpu(tx_resp->wireless_media_time)); in iwl_mvm_rx_tx_cmd_single()
1868 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) in iwl_mvm_rx_tx_cmd_single()
1869 iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); in iwl_mvm_rx_tx_cmd_single()
1871 if (sta->wme && tid != IWL_MGMT_TID) { in iwl_mvm_rx_tx_cmd_single()
1873 &mvmsta->tid_data[tid]; in iwl_mvm_rx_tx_cmd_single()
1876 spin_lock_bh(&mvmsta->lock); in iwl_mvm_rx_tx_cmd_single()
1879 tid_data->next_reclaimed = next_reclaimed; in iwl_mvm_rx_tx_cmd_single()
1886 "NDP - don't update next_reclaimed\n"); in iwl_mvm_rx_tx_cmd_single()
1891 if (mvmsta->sleep_tx_count) { in iwl_mvm_rx_tx_cmd_single()
1892 mvmsta->sleep_tx_count--; in iwl_mvm_rx_tx_cmd_single()
1893 if (mvmsta->sleep_tx_count && in iwl_mvm_rx_tx_cmd_single()
1896 * The number of frames in the queue in iwl_mvm_rx_tx_cmd_single()
1899 * Tx queue. in iwl_mvm_rx_tx_cmd_single()
1912 spin_unlock_bh(&mvmsta->lock); in iwl_mvm_rx_tx_cmd_single()
1917 mvmsta->sleep_tx_count = 0; in iwl_mvm_rx_tx_cmd_single()
1922 if (mvmsta->next_status_eosp) { in iwl_mvm_rx_tx_cmd_single()
1923 mvmsta->next_status_eosp = false; in iwl_mvm_rx_tx_cmd_single()
1955 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_agg_dbg() argument
1957 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd_agg_dbg()
1963 for (i = 0; i < tx_resp->frame_count; i++) { in iwl_mvm_rx_tx_cmd_agg_dbg()
1969 "status %s (0x%04x), try-count (%d) seq (0x%x)\n", in iwl_mvm_rx_tx_cmd_agg_dbg()
1978 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_rx_tx_cmd_agg_dbg()
1984 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_agg_dbg() argument
1989 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_agg() argument
1991 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd_agg()
1992 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_agg()
1993 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_agg()
1994 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in iwl_mvm_rx_tx_cmd_agg()
1996 int queue = SEQ_TO_QUEUE(sequence); in iwl_mvm_rx_tx_cmd_agg() local
1999 if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && in iwl_mvm_rx_tx_cmd_agg()
2000 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) in iwl_mvm_rx_tx_cmd_agg()
2003 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); in iwl_mvm_rx_tx_cmd_agg()
2009 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_agg()
2010 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) { in iwl_mvm_rx_tx_cmd_agg()
2016 mvmsta->tid_data[tid].rate_n_flags = in iwl_mvm_rx_tx_cmd_agg()
2017 le32_to_cpu(tx_resp->initial_rate); in iwl_mvm_rx_tx_cmd_agg()
2018 mvmsta->tid_data[tid].tx_time = in iwl_mvm_rx_tx_cmd_agg()
2019 le16_to_cpu(tx_resp->wireless_media_time); in iwl_mvm_rx_tx_cmd_agg()
2020 mvmsta->tid_data[tid].lq_color = in iwl_mvm_rx_tx_cmd_agg()
2021 TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); in iwl_mvm_rx_tx_cmd_agg()
2023 le16_to_cpu(tx_resp->wireless_media_time)); in iwl_mvm_rx_tx_cmd_agg()
2031 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_tx_cmd() local
2032 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd()
2034 if (tx_resp->frame_count == 1) in iwl_mvm_rx_tx_cmd()
2035 iwl_mvm_rx_tx_cmd_single(mvm, pkt); in iwl_mvm_rx_tx_cmd()
2037 iwl_mvm_rx_tx_cmd_agg(mvm, pkt); in iwl_mvm_rx_tx_cmd()
2052 if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || in iwl_mvm_tx_reclaim()
2059 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_tx_reclaim()
2071 * block-ack window (we assume that they've been successfully in iwl_mvm_tx_reclaim()
2074 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush); in iwl_mvm_tx_reclaim()
2079 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_tx_reclaim()
2081 memset(&info->status, 0, sizeof(info->status)); in iwl_mvm_tx_reclaim()
2087 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mvm_tx_reclaim()
2089 info->flags &= ~IEEE80211_TX_STAT_ACK; in iwl_mvm_tx_reclaim()
2094 * invalidated in order to prevent new Tx from being sent, but there may in iwl_mvm_tx_reclaim()
2095 * be some frames already in-flight). in iwl_mvm_tx_reclaim()
2097 * sta-dependent stuff since it's in the middle of being removed in iwl_mvm_tx_reclaim()
2104 tid_data = &mvmsta->tid_data[tid]; in iwl_mvm_tx_reclaim()
2106 if (tid_data->txq_id != txq) { in iwl_mvm_tx_reclaim()
2109 tid_data->txq_id, tid); in iwl_mvm_tx_reclaim()
2114 spin_lock_bh(&mvmsta->lock); in iwl_mvm_tx_reclaim()
2116 tid_data->next_reclaimed = index; in iwl_mvm_tx_reclaim()
2123 tx_info->status.status_driver_data[0] = in iwl_mvm_tx_reclaim()
2124 RS_DRV_DATA_PACK(tid_data->lq_color, in iwl_mvm_tx_reclaim()
2125 tx_info->status.status_driver_data[0]); in iwl_mvm_tx_reclaim()
2126 tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; in iwl_mvm_tx_reclaim()
2129 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_tx_reclaim()
2133 if (ieee80211_is_data_qos(hdr->frame_control)) in iwl_mvm_tx_reclaim()
2142 info->flags |= IEEE80211_TX_STAT_AMPDU; in iwl_mvm_tx_reclaim()
2143 memcpy(&info->status, &tx_info->status, in iwl_mvm_tx_reclaim()
2144 sizeof(tx_info->status)); in iwl_mvm_tx_reclaim()
2145 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); in iwl_mvm_tx_reclaim()
2149 spin_unlock_bh(&mvmsta->lock); in iwl_mvm_tx_reclaim()
2159 /* no TLC offload, so non-MLD mode */ in iwl_mvm_tx_reclaim()
2160 if (mvmsta->vif) in iwl_mvm_tx_reclaim()
2162 rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf); in iwl_mvm_tx_reclaim()
2167 tx_info->band = chanctx_conf->def.chan->band; in iwl_mvm_tx_reclaim()
2168 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); in iwl_mvm_tx_reclaim()
2179 ieee80211_tx_status_skb(mvm->hw, skb); in iwl_mvm_tx_reclaim()
2185 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_ba_notif() local
2186 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); in iwl_mvm_rx_ba_notif()
2197 (void *)pkt->data; in iwl_mvm_rx_ba_notif()
2198 u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); in iwl_mvm_rx_ba_notif()
2206 sta_id = ba_res->sta_id; in iwl_mvm_rx_ba_notif()
2207 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); in iwl_mvm_rx_ba_notif()
2208 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); in iwl_mvm_rx_ba_notif()
2210 (u16)le32_to_cpu(ba_res->wireless_time); in iwl_mvm_rx_ba_notif()
2212 (void *)(uintptr_t)ba_res->reduced_txp; in iwl_mvm_rx_ba_notif()
2214 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); in iwl_mvm_rx_ba_notif()
2226 sta_id, le32_to_cpu(ba_res->flags), in iwl_mvm_rx_ba_notif()
2227 le16_to_cpu(ba_res->txed), in iwl_mvm_rx_ba_notif()
2228 le16_to_cpu(ba_res->done)); in iwl_mvm_rx_ba_notif()
2235 * (rcu is invalidated in order to prevent new Tx from being in iwl_mvm_rx_ba_notif()
2236 * sent, but there may be some frames already in-flight). in iwl_mvm_rx_ba_notif()
2238 * sta-dependent stuff since it's in the middle of being removed in iwl_mvm_rx_ba_notif()
2245 &ba_res->tfd[i]; in iwl_mvm_rx_ba_notif()
2247 tid = ba_tfd->tid; in iwl_mvm_rx_ba_notif()
2252 mvmsta->tid_data[i].lq_color = lq_color; in iwl_mvm_rx_ba_notif()
2255 (int)(le16_to_cpu(ba_tfd->q_num)), in iwl_mvm_rx_ba_notif()
2256 le16_to_cpu(ba_tfd->tfd_index), in iwl_mvm_rx_ba_notif()
2258 le32_to_cpu(ba_res->tx_rate), false); in iwl_mvm_rx_ba_notif()
2263 le32_to_cpu(ba_res->wireless_time)); in iwl_mvm_rx_ba_notif()
2266 le16_to_cpu(ba_res->txed), true, 0); in iwl_mvm_rx_ba_notif()
2272 ba_notif = (void *)pkt->data; in iwl_mvm_rx_ba_notif()
2273 sta_id = ba_notif->sta_id; in iwl_mvm_rx_ba_notif()
2274 tid = ba_notif->tid; in iwl_mvm_rx_ba_notif()
2275 /* "flow" corresponds to Tx queue */ in iwl_mvm_rx_ba_notif()
2276 txq = le16_to_cpu(ba_notif->scd_flow); in iwl_mvm_rx_ba_notif()
2277 /* "ssn" is start of block-ack Tx window, corresponds to index in iwl_mvm_rx_ba_notif()
2278 * (in Tx queue's circular buffer) of first TFD/frame in window */ in iwl_mvm_rx_ba_notif()
2279 index = le16_to_cpu(ba_notif->scd_ssn); in iwl_mvm_rx_ba_notif()
2290 tid_data = &mvmsta->tid_data[tid]; in iwl_mvm_rx_ba_notif()
2292 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; in iwl_mvm_rx_ba_notif()
2293 ba_info.status.ampdu_len = ba_notif->txed; in iwl_mvm_rx_ba_notif()
2294 ba_info.status.tx_time = tid_data->tx_time; in iwl_mvm_rx_ba_notif()
2296 (void *)(uintptr_t)ba_notif->reduced_txp; in iwl_mvm_rx_ba_notif()
2302 ba_notif->sta_addr, ba_notif->sta_id); in iwl_mvm_rx_ba_notif()
2306 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), in iwl_mvm_rx_ba_notif()
2307 le64_to_cpu(ba_notif->bitmap), txq, index, in iwl_mvm_rx_ba_notif()
2308 ba_notif->txed, ba_notif->txed_2_done); in iwl_mvm_rx_ba_notif()
2311 ba_notif->reduced_txp); in iwl_mvm_rx_ba_notif()
2314 tid_data->rate_n_flags, false); in iwl_mvm_rx_ba_notif()
2320 * queue might not be empty. The race-free way to handle this is to:
2322 * 2) flush the Tx path
2358 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0) in iwl_mvm_flush_sta_tids()
2376 ret = -EIO; in iwl_mvm_flush_sta_tids()
2380 rsp = (void *)cmd.resp_pkt->data; in iwl_mvm_flush_sta_tids()
2382 if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id, in iwl_mvm_flush_sta_tids()
2384 sta_id, le16_to_cpu(rsp->sta_id))) { in iwl_mvm_flush_sta_tids()
2385 ret = -EIO; in iwl_mvm_flush_sta_tids()
2389 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); in iwl_mvm_flush_sta_tids()
2392 ret = -EIO; in iwl_mvm_flush_sta_tids()
2398 struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; in iwl_mvm_flush_sta_tids()
2399 int tid = le16_to_cpu(queue_info->tid); in iwl_mvm_flush_sta_tids()
2400 int read_before = le16_to_cpu(queue_info->read_before_flush); in iwl_mvm_flush_sta_tids()
2401 int read_after = le16_to_cpu(queue_info->read_after_flush); in iwl_mvm_flush_sta_tids()
2402 int queue_num = le16_to_cpu(queue_info->queue_num); in iwl_mvm_flush_sta_tids()
2408 "tid %d queue_id %d read-before %d read-after %d\n", in iwl_mvm_flush_sta_tids()