More fixes:

- ath12k
    - avoid busy-waiting
    - activate correct number of links
  - iwlwifi
    - iwldvm regression (lots of warnings)
    - iwlmld merge damage regression (crash)
    - fix build with some old gcc versions
  - carl9170: don't talk to device w/o FW [syzbot]
  - ath6kl: remove bad FW WARN [syzbot]
  - ieee80211: use variable-length arrays [syzbot]
  - mac80211
    - remove WARN on delayed beacon update [syzbot]
    - drop OCB frames with invalid source [syzbot]
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEpeA8sTs3M8SN2hR410qiO8sPaAAFAmhSg10ACgkQ10qiO8sP
 aAD10w//fdel2PXhAT6weaNRXmpUStR/TWCcKeUy6pko1BanxVbJNK9DPsCmg9Ym
 ApDewdsmTF600VAW+/JpoHCckzIGhx8Q/i5DJ0Xi/sbLweFMeDcmAsHkqVu11wTn
 gatTXlwP0/iXCL/EEtuS8fS1vs7HF29Q4QVNHPtOEaoNSm7LpE20Ukve4fBB8eXr
 oG3l7bNxW4STIFJL1+EWIXXv+tuyfORsIoD65N+NephJvTYvdKocWjjjDgnsLrPR
 HOW7Yli9G7xebXvDhdUVzdDjG0KEcarEHbyIosiPCv/xSnFZlrq4pg2zz8psHG4H
 MxeHlDgKR2gSdAeX207vB2yZ884yrRtA2XuQ7t8JrVATvXI6LE9puB+P0ssqLsvz
 jO9LjU9cfg8EoaBnK1w6vjL+OH3z2D1D8CvrkRjAUxZUKcBfk/NIQu4zsei5GuL5
 fGDSowLiMUMMyecvKbLggxMEwp3qTs9zTb4w5Vn+QtbVFjj8+HgEb4AHrQM9tr/y
 GavBZvCnKqEnv/VgkTh2Z9KfVznFfXKi3N0XR6gotVDueMeG6fkXvwFTSg6dW0UX
 c1vHJTZ0H0AHelWoSz8/1FO6NXmqU76hnW7EwcYkwkGJ3/tji1Bzyt6ihknIttKl
 AVYno+nmAC1WJNzKxJDFE+HX/F1RwowKlek/nLUMU1Ti4kzgRWo=
 =Q6JR
 -----END PGP SIGNATURE-----

Merge tag 'wireless-2025-06-18' of https://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless

Johannes Berg says:

====================
More fixes:
 - ath12k
   - avoid busy-waiting
   - activate correct number of links
 - iwlwifi
   - iwldvm regression (lots of warnings)
   - iwlmld merge damage regression (crash)
   - fix build with some old gcc versions
 - carl9170: don't talk to device w/o FW [syzbot]
 - ath6kl: remove bad FW WARN [syzbot]
 - ieee80211: use variable-length arrays [syzbot]
 - mac80211
   - remove WARN on delayed beacon update [syzbot]
   - drop OCB frames with invalid source [syzbot]

* tag 'wireless-2025-06-18' of https://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless:
  wifi: iwlwifi: Fix incorrect logic on cmd_ver range checking
  wifi: iwlwifi: dvm: restore n_no_reclaim_cmds setting
  wifi: iwlwifi: cfg: Limit cb_size to valid range
  wifi: iwlwifi: restore missing initialization of async_handlers_list (again)
  wifi: ath6kl: remove WARN on bad firmware input
  wifi: carl9170: do not ping device which has failed to load firmware
  wifi: ath12k: don't wait when there is no vdev started
  wifi: ath12k: don't use static variables in ath12k_wmi_fw_stats_process()
  wifi: ath12k: avoid burning CPU while waiting for firmware stats
  wifi: ath12k: fix documentation on firmware stats
  wifi: ath12k: don't activate more links than firmware supports
  wifi: ath12k: update link active in case two links fall on the same MAC
  wifi: ath12k: support WMI_MLO_LINK_SET_ACTIVE_CMDID command
  wifi: ath12k: update freq range for each hardware mode
  wifi: ath12k: parse and save sbs_lower_band_end_freq from WMI_SERVICE_READY_EXT2_EVENTID event
  wifi: ath12k: parse and save hardware mode info from WMI_SERVICE_READY_EXT_EVENTID event for later use
  wifi: ath12k: Avoid CPU busy-wait by handling VDEV_STAT and BCN_STAT
  wifi: mac80211: don't WARN for late channel/color switch
  wifi: mac80211: drop invalid source address OCB frames
  wifi: remove zero-length arrays
====================

Link: https://patch.msgid.link/20250618210642.35805-6-johannes@sipsolutions.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
pull/1270/head
Jakub Kicinski 2025-06-19 08:38:40 -07:00
commit dccf87ea49
18 changed files with 1445 additions and 133 deletions

View File

@ -1216,6 +1216,7 @@ void ath12k_fw_stats_init(struct ath12k *ar)
INIT_LIST_HEAD(&ar->fw_stats.pdevs);
INIT_LIST_HEAD(&ar->fw_stats.bcn);
init_completion(&ar->fw_stats_complete);
init_completion(&ar->fw_stats_done);
}
void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
@ -1228,8 +1229,9 @@ void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
void ath12k_fw_stats_reset(struct ath12k *ar)
{
spin_lock_bh(&ar->data_lock);
ar->fw_stats.fw_stats_done = false;
ath12k_fw_stats_free(&ar->fw_stats);
ar->fw_stats.num_vdev_recvd = 0;
ar->fw_stats.num_bcn_recvd = 0;
spin_unlock_bh(&ar->data_lock);
}

View File

@ -601,6 +601,12 @@ struct ath12k_sta {
#define ATH12K_NUM_CHANS 101
#define ATH12K_MAX_5GHZ_CHAN 173
static inline bool ath12k_is_2ghz_channel_freq(u32 freq)
{
return freq >= ATH12K_MIN_2GHZ_FREQ &&
freq <= ATH12K_MAX_2GHZ_FREQ;
}
enum ath12k_hw_state {
ATH12K_HW_STATE_OFF,
ATH12K_HW_STATE_ON,
@ -626,7 +632,8 @@ struct ath12k_fw_stats {
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
bool fw_stats_done;
u32 num_vdev_recvd;
u32 num_bcn_recvd;
};
struct ath12k_dbg_htt_stats {
@ -806,6 +813,7 @@ struct ath12k {
bool regdom_set_by_user;
struct completion fw_stats_complete;
struct completion fw_stats_done;
struct completion mlo_setup_done;
u32 mlo_setup_status;

View File

@ -1251,64 +1251,6 @@ void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
*/
}
void
ath12k_debugfs_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev;
bool is_end;
static unsigned int num_vdev, num_bcn;
size_t total_vdevs_started = 0;
int i;
if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
if (list_empty(&stats->vdevs)) {
ath12k_warn(ab, "empty vdev stats");
return;
}
/* FW sends all the active VDEV stats irrespective of PDEV,
* hence limit until the count of all VDEVs started
*/
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar)
total_vdevs_started += pdev->ar->num_started_vdevs;
}
rcu_read_unlock();
is_end = ((++num_vdev) == total_vdevs_started);
list_splice_tail_init(&stats->vdevs,
&ar->fw_stats.vdevs);
if (is_end) {
ar->fw_stats.fw_stats_done = true;
num_vdev = 0;
}
return;
}
if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
if (list_empty(&stats->bcn)) {
ath12k_warn(ab, "empty beacon stats");
return;
}
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
is_end = ((++num_bcn) == ar->num_started_vdevs);
list_splice_tail_init(&stats->bcn,
&ar->fw_stats.bcn);
if (is_end) {
ar->fw_stats.fw_stats_done = true;
num_bcn = 0;
}
}
}
static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
{
struct ath12k *ar = inode->i_private;

View File

@ -12,8 +12,6 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats);
void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void ath12k_debugfs_pdev_create(struct ath12k_base *ab);
@ -126,11 +124,6 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar)
{
}
static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats)
{
}
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
return false;

View File

@ -4360,7 +4360,7 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
{
struct ath12k_base *ab = ar->ab;
struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
unsigned long timeout, time_left;
unsigned long time_left;
int ret;
guard(mutex)(&ah->hw_mutex);
@ -4368,19 +4368,13 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
if (ah->state != ATH12K_HW_STATE_ON)
return -ENETDOWN;
/* FW stats can get split when exceeding the stats data buffer limit.
* In that case, since there is no end marking for the back-to-back
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
timeout = jiffies + msecs_to_jiffies(3 * 1000);
ath12k_fw_stats_reset(ar);
reinit_completion(&ar->fw_stats_complete);
reinit_completion(&ar->fw_stats_done);
ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
param->vdev_id, param->pdev_id);
if (ret) {
ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
return ret;
@ -4391,7 +4385,6 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
param->pdev_id, param->vdev_id, param->stats_id);
time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
if (!time_left) {
ath12k_warn(ab, "time out while waiting for get fw stats\n");
return -ETIMEDOUT;
@ -4400,20 +4393,15 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
/* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
* when stats data buffer limit is reached. fw_stats_complete
* is completed once host receives first event from firmware, but
* still end might not be marked in the TLV.
* Below loop is to confirm that firmware completed sending all the event
* and fw_stats_done is marked true when end is marked in the TLV.
* still there could be more events following. Below is to wait
* until firmware completes sending all the events.
*/
for (;;) {
if (time_after(jiffies, timeout))
break;
spin_lock_bh(&ar->data_lock);
if (ar->fw_stats.fw_stats_done) {
spin_unlock_bh(&ar->data_lock);
break;
}
spin_unlock_bh(&ar->data_lock);
time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
if (!time_left) {
ath12k_warn(ab, "time out while waiting for fw stats done\n");
return -ETIMEDOUT;
}
return 0;
}
@ -5890,6 +5878,327 @@ exit:
return ret;
}
static bool ath12k_mac_is_freq_on_mac(struct ath12k_hw_mode_freq_range_arg *freq_range,
u32 freq, u8 mac_id)
{
return (freq >= freq_range[mac_id].low_2ghz_freq &&
freq <= freq_range[mac_id].high_2ghz_freq) ||
(freq >= freq_range[mac_id].low_5ghz_freq &&
freq <= freq_range[mac_id].high_5ghz_freq);
}
static bool
ath12k_mac_2_freq_same_mac_in_freq_range(struct ath12k_base *ab,
struct ath12k_hw_mode_freq_range_arg *freq_range,
u32 freq_link1, u32 freq_link2)
{
u8 i;
for (i = 0; i < MAX_RADIOS; i++) {
if (ath12k_mac_is_freq_on_mac(freq_range, freq_link1, i) &&
ath12k_mac_is_freq_on_mac(freq_range, freq_link2, i))
return true;
}
return false;
}
static bool ath12k_mac_is_hw_dbs_capable(struct ath12k_base *ab)
{
return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
ab->wmi_ab.svc_map) &&
ab->wmi_ab.hw_mode_info.support_dbs;
}
static bool ath12k_mac_2_freq_same_mac_in_dbs(struct ath12k_base *ab,
u32 freq_link1, u32 freq_link2)
{
struct ath12k_hw_mode_freq_range_arg *freq_range;
if (!ath12k_mac_is_hw_dbs_capable(ab))
return true;
freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[ATH12K_HW_MODE_DBS];
return ath12k_mac_2_freq_same_mac_in_freq_range(ab, freq_range,
freq_link1, freq_link2);
}
static bool ath12k_mac_is_hw_sbs_capable(struct ath12k_base *ab)
{
return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
ab->wmi_ab.svc_map) &&
ab->wmi_ab.hw_mode_info.support_sbs;
}
static bool ath12k_mac_2_freq_same_mac_in_sbs(struct ath12k_base *ab,
u32 freq_link1, u32 freq_link2)
{
struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *sbs_uppr_share;
struct ath12k_hw_mode_freq_range_arg *sbs_low_share;
struct ath12k_hw_mode_freq_range_arg *sbs_range;
if (!ath12k_mac_is_hw_sbs_capable(ab))
return true;
if (ab->wmi_ab.sbs_lower_band_end_freq) {
sbs_uppr_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
sbs_low_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_low_share,
freq_link1, freq_link2) ||
ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_uppr_share,
freq_link1, freq_link2);
}
sbs_range = info->freq_range_caps[ATH12K_HW_MODE_SBS];
return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_range,
freq_link1, freq_link2);
}
static bool ath12k_mac_freqs_on_same_mac(struct ath12k_base *ab,
u32 freq_link1, u32 freq_link2)
{
return ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_link1, freq_link2) &&
ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_link1, freq_link2);
}
static int ath12k_mac_mlo_sta_set_link_active(struct ath12k_base *ab,
enum wmi_mlo_link_force_reason reason,
enum wmi_mlo_link_force_mode mode,
u8 *mlo_vdev_id_lst,
u8 num_mlo_vdev,
u8 *mlo_inactive_vdev_lst,
u8 num_mlo_inactive_vdev)
{
struct wmi_mlo_link_set_active_arg param = {0};
u32 entry_idx, entry_offset, vdev_idx;
u8 vdev_id;
param.reason = reason;
param.force_mode = mode;
for (vdev_idx = 0; vdev_idx < num_mlo_vdev; vdev_idx++) {
vdev_id = mlo_vdev_id_lst[vdev_idx];
entry_idx = vdev_id / 32;
entry_offset = vdev_id % 32;
if (entry_idx >= WMI_MLO_LINK_NUM_SZ) {
ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d",
entry_idx, num_mlo_vdev, vdev_id);
return -EINVAL;
}
param.vdev_bitmap[entry_idx] |= 1 << entry_offset;
/* update entry number if entry index changed */
if (param.num_vdev_bitmap < entry_idx + 1)
param.num_vdev_bitmap = entry_idx + 1;
}
ath12k_dbg(ab, ATH12K_DBG_MAC,
"num_vdev_bitmap %d vdev_bitmap[0] = 0x%x, vdev_bitmap[1] = 0x%x",
param.num_vdev_bitmap, param.vdev_bitmap[0], param.vdev_bitmap[1]);
if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
for (vdev_idx = 0; vdev_idx < num_mlo_inactive_vdev; vdev_idx++) {
vdev_id = mlo_inactive_vdev_lst[vdev_idx];
entry_idx = vdev_id / 32;
entry_offset = vdev_id % 32;
if (entry_idx >= WMI_MLO_LINK_NUM_SZ) {
ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d",
entry_idx, num_mlo_inactive_vdev, vdev_id);
return -EINVAL;
}
param.inactive_vdev_bitmap[entry_idx] |= 1 << entry_offset;
/* update entry number if entry index changed */
if (param.num_inactive_vdev_bitmap < entry_idx + 1)
param.num_inactive_vdev_bitmap = entry_idx + 1;
}
ath12k_dbg(ab, ATH12K_DBG_MAC,
"num_vdev_bitmap %d inactive_vdev_bitmap[0] = 0x%x, inactive_vdev_bitmap[1] = 0x%x",
param.num_inactive_vdev_bitmap,
param.inactive_vdev_bitmap[0],
param.inactive_vdev_bitmap[1]);
}
if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM ||
mode == WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM) {
param.num_link_entry = 1;
param.link_num[0].num_of_link = num_mlo_vdev - 1;
}
return ath12k_wmi_send_mlo_link_set_active_cmd(ab, &param);
}
static int ath12k_mac_mlo_sta_update_link_active(struct ath12k_base *ab,
struct ieee80211_hw *hw,
struct ath12k_vif *ahvif)
{
u8 mlo_vdev_id_lst[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
u32 mlo_freq_list[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
unsigned long links = ahvif->links_map;
enum wmi_mlo_link_force_reason reason;
struct ieee80211_chanctx_conf *conf;
enum wmi_mlo_link_force_mode mode;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
u8 num_mlo_vdev = 0;
u8 link_id;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
/* make sure vdev is created on this device */
if (!arvif || !arvif->is_created || arvif->ar->ab != ab)
continue;
info = ath12k_mac_get_link_bss_conf(arvif);
conf = wiphy_dereference(hw->wiphy, info->chanctx_conf);
mlo_freq_list[num_mlo_vdev] = conf->def.chan->center_freq;
mlo_vdev_id_lst[num_mlo_vdev] = arvif->vdev_id;
num_mlo_vdev++;
}
/* It is not allowed to activate more links than a single device
* supported. Something goes wrong if we reach here.
*/
if (num_mlo_vdev > ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) {
WARN_ON_ONCE(1);
return -EINVAL;
}
/* if 2 links are established and both link channels fall on the
* same hardware MAC, send command to firmware to deactivate one
* of them.
*/
if (num_mlo_vdev == 2 &&
ath12k_mac_freqs_on_same_mac(ab, mlo_freq_list[0],
mlo_freq_list[1])) {
mode = WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM;
reason = WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT;
return ath12k_mac_mlo_sta_set_link_active(ab, reason, mode,
mlo_vdev_id_lst, num_mlo_vdev,
NULL, 0);
}
return 0;
}
static bool ath12k_mac_are_sbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2)
{
if (!ath12k_mac_is_hw_sbs_capable(ab))
return false;
if (ath12k_is_2ghz_channel_freq(freq_1) ||
ath12k_is_2ghz_channel_freq(freq_2))
return false;
return !ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_1, freq_2);
}
static bool ath12k_mac_are_dbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2)
{
if (!ath12k_mac_is_hw_dbs_capable(ab))
return false;
return !ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_1, freq_2);
}
static int ath12k_mac_select_links(struct ath12k_base *ab,
struct ieee80211_vif *vif,
struct ieee80211_hw *hw,
u16 *selected_links)
{
unsigned long useful_links = ieee80211_vif_usable_links(vif);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
u8 num_useful_links = hweight_long(useful_links);
struct ieee80211_chanctx_conf *chanctx;
struct ath12k_link_vif *assoc_arvif;
u32 assoc_link_freq, partner_freq;
u16 sbs_links = 0, dbs_links = 0;
struct ieee80211_bss_conf *info;
struct ieee80211_channel *chan;
struct ieee80211_sta *sta;
struct ath12k_sta *ahsta;
u8 link_id;
/* activate all useful links if less than max supported */
if (num_useful_links <= ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) {
*selected_links = useful_links;
return 0;
}
/* only in station mode we can get here, so it's safe
* to use ap_addr
*/
rcu_read_lock();
sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!sta) {
rcu_read_unlock();
ath12k_warn(ab, "failed to find sta with addr %pM\n", vif->cfg.ap_addr);
return -EINVAL;
}
ahsta = ath12k_sta_to_ahsta(sta);
assoc_arvif = wiphy_dereference(hw->wiphy, ahvif->link[ahsta->assoc_link_id]);
info = ath12k_mac_get_link_bss_conf(assoc_arvif);
chanctx = rcu_dereference(info->chanctx_conf);
assoc_link_freq = chanctx->def.chan->center_freq;
rcu_read_unlock();
ath12k_dbg(ab, ATH12K_DBG_MAC, "assoc link %u freq %u\n",
assoc_arvif->link_id, assoc_link_freq);
/* assoc link is already activated and has to be kept active,
* only need to select a partner link from others.
*/
useful_links &= ~BIT(assoc_arvif->link_id);
for_each_set_bit(link_id, &useful_links, IEEE80211_MLD_MAX_NUM_LINKS) {
info = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
if (!info) {
ath12k_warn(ab, "failed to get link info for link: %u\n",
link_id);
return -ENOLINK;
}
chan = info->chanreq.oper.chan;
if (!chan) {
ath12k_warn(ab, "failed to get chan for link: %u\n", link_id);
return -EINVAL;
}
partner_freq = chan->center_freq;
if (ath12k_mac_are_sbs_chan(ab, assoc_link_freq, partner_freq)) {
sbs_links |= BIT(link_id);
ath12k_dbg(ab, ATH12K_DBG_MAC, "new SBS link %u freq %u\n",
link_id, partner_freq);
continue;
}
if (ath12k_mac_are_dbs_chan(ab, assoc_link_freq, partner_freq)) {
dbs_links |= BIT(link_id);
ath12k_dbg(ab, ATH12K_DBG_MAC, "new DBS link %u freq %u\n",
link_id, partner_freq);
continue;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "non DBS/SBS link %u freq %u\n",
link_id, partner_freq);
}
/* choose the first candidate no matter how many is in the list */
if (sbs_links)
link_id = __ffs(sbs_links);
else if (dbs_links)
link_id = __ffs(dbs_links);
else
link_id = ffs(useful_links) - 1;
ath12k_dbg(ab, ATH12K_DBG_MAC, "select partner link %u\n", link_id);
*selected_links = BIT(assoc_arvif->link_id) | BIT(link_id);
return 0;
}
static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -5899,10 +6208,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_base *prev_ab = NULL, *ab;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
unsigned long valid_links;
u8 link_id = 0;
u16 selected_links = 0;
u8 link_id = 0, i;
struct ath12k *ar;
int ret;
lockdep_assert_wiphy(hw->wiphy);
@ -5972,8 +6284,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
* about to move to the associated state.
*/
if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC)
ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif));
old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) {
/* TODO: for now only do link selection for single device
* MLO case. Other cases would be handled in the future.
*/
ab = ah->radio[0].ab;
if (ab->ag->num_devices == 1) {
ret = ath12k_mac_select_links(ab, vif, hw, &selected_links);
if (ret) {
ath12k_warn(ab,
"failed to get selected links: %d\n", ret);
goto exit;
}
} else {
selected_links = ieee80211_vif_usable_links(vif);
}
ieee80211_set_active_links(vif, selected_links);
}
/* Handle all the other state transitions in generic way */
valid_links = ahsta->links_map;
@ -5997,6 +6325,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
}
}
if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) {
for_each_ar(ah, ar, i) {
ab = ar->ab;
if (prev_ab == ab)
continue;
ret = ath12k_mac_mlo_sta_update_link_active(ab, hw, ahvif);
if (ret) {
ath12k_warn(ab,
"failed to update link active state on connect %d\n",
ret);
goto exit;
}
prev_ab = ab;
}
}
/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
* Remove the station from driver (handle ML sta here since that
* needs special handling. Normal sta will be handled in generic

View File

@ -54,6 +54,8 @@ struct ath12k_generic_iter {
#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
#define ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE 2
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,

View File

@ -91,6 +91,11 @@ struct ath12k_wmi_svc_rdy_ext2_parse {
bool dma_ring_cap_done;
bool spectral_bin_scaling_done;
bool mac_phy_caps_ext_done;
bool hal_reg_caps_ext2_done;
bool scan_radio_caps_ext2_done;
bool twt_caps_done;
bool htt_msdu_idx_to_qtype_map_done;
bool dbs_or_sbs_cap_ext_done;
};
struct ath12k_wmi_rdy_parse {
@ -4395,6 +4400,7 @@ static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
u16 len, const void *ptr, void *data)
{
struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info;
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
enum wmi_host_hw_mode_config_type mode, pref;
@ -4427,8 +4433,11 @@ static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
}
}
ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
soc->wmi_ab.preferred_hw_mode);
svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps;
ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n",
svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode);
if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
return -EINVAL;
@ -4658,6 +4667,65 @@ free_dir_buff:
return ret;
}
static void
ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab,
const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap,
struct ath12k_svc_ext_mac_phy_info *mac_phy_info)
{
mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id);
mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands);
mac_phy_info->hw_freq_range.low_2ghz_freq =
__le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq);
mac_phy_info->hw_freq_range.high_2ghz_freq =
__le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq);
mac_phy_info->hw_freq_range.low_5ghz_freq =
__le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq);
mac_phy_info->hw_freq_range.high_5ghz_freq =
__le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq);
}
static void
ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab,
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
{
struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap;
const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
struct ath12k_svc_ext_mac_phy_info *mac_phy_info;
u32 hw_mode_id, phy_bit_map;
u8 hw_idx;
mac_phy_info = &svc_ext_info->mac_phy_info[0];
mac_phy_cap = svc_rdy_ext->mac_phy_caps;
for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) {
hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx];
hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id);
phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map);
while (phy_bit_map) {
ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info);
mac_phy_info->hw_mode_config_type =
le32_get_bits(hw_mode_cap->hw_mode_config_type,
WMI_HW_MODE_CAP_CFG_TYPE);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n",
hw_idx, hw_mode_id,
mac_phy_info->hw_mode_config_type,
mac_phy_info->supported_bands, mac_phy_info->phy_id,
mac_phy_info->hw_freq_range.low_2ghz_freq,
mac_phy_info->hw_freq_range.high_2ghz_freq,
mac_phy_info->hw_freq_range.low_5ghz_freq,
mac_phy_info->hw_freq_range.high_5ghz_freq);
mac_phy_cap++;
mac_phy_info++;
phy_bit_map >>= 1;
}
}
}
static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
@ -4706,6 +4774,8 @@ static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
return ret;
}
ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext);
svc_rdy_ext->mac_phy_done = true;
} else if (!svc_rdy_ext->ext_hal_reg_done) {
ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
@ -4922,10 +4992,449 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
}
static void
ath12k_wmi_update_freq_info(struct ath12k_base *ab,
struct ath12k_svc_ext_mac_phy_info *mac_cap,
enum ath12k_hw_mode mode,
u32 phy_id)
{
struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *mac_range;
mac_range = &hw_mode_info->freq_range_caps[mode][phy_id];
if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
mac_range->low_2ghz_freq = max_t(u32,
mac_cap->hw_freq_range.low_2ghz_freq,
ATH12K_MIN_2GHZ_FREQ);
mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ?
min_t(u32,
mac_cap->hw_freq_range.high_2ghz_freq,
ATH12K_MAX_2GHZ_FREQ) :
ATH12K_MAX_2GHZ_FREQ;
}
if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
mac_range->low_5ghz_freq = max_t(u32,
mac_cap->hw_freq_range.low_5ghz_freq,
ATH12K_MIN_5GHZ_FREQ);
mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ?
min_t(u32,
mac_cap->hw_freq_range.high_5ghz_freq,
ATH12K_MAX_6GHZ_FREQ) :
ATH12K_MAX_6GHZ_FREQ;
}
}
static bool
ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab,
enum ath12k_hw_mode hwmode)
{
struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *mac_range;
u8 phy_id;
for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id];
/* modify SBS/DBS range only when both phy for DBS are filled */
if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq)
return false;
}
return true;
}
static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab)
{
struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *mac_range;
u8 phy_id;
mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS];
/* Reset 5 GHz range for shared mac for DBS */
for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
if (mac_range[phy_id].low_2ghz_freq &&
mac_range[phy_id].low_5ghz_freq) {
mac_range[phy_id].low_5ghz_freq = 0;
mac_range[phy_id].high_5ghz_freq = 0;
}
}
}
static u32
ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
{
u32 highest_freq = 0;
u8 phy_id;
for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
if (range[phy_id].high_5ghz_freq > highest_freq)
highest_freq = range[phy_id].high_5ghz_freq;
}
return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ;
}
static u32
ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
{
u32 lowest_freq = 0;
u8 phy_id;
for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
range[phy_id].low_5ghz_freq < lowest_freq)
lowest_freq = range[phy_id].low_5ghz_freq;
}
return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ;
}
static void
ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab,
u16 sbs_range_sep,
struct ath12k_hw_mode_freq_range_arg *ref_freq)
{
struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range;
u8 phy_id;
upper_sbs_freq_range =
hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
upper_sbs_freq_range[phy_id].low_2ghz_freq =
ref_freq[phy_id].low_2ghz_freq;
upper_sbs_freq_range[phy_id].high_2ghz_freq =
ref_freq[phy_id].high_2ghz_freq;
/* update for shared mac */
if (upper_sbs_freq_range[phy_id].low_2ghz_freq) {
upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
upper_sbs_freq_range[phy_id].high_5ghz_freq =
ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
} else {
upper_sbs_freq_range[phy_id].low_5ghz_freq =
ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
}
}
}
static void
ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab,
u16 sbs_range_sep,
struct ath12k_hw_mode_freq_range_arg *ref_freq)
{
struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range;
u8 phy_id;
lower_sbs_freq_range =
hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
lower_sbs_freq_range[phy_id].low_2ghz_freq =
ref_freq[phy_id].low_2ghz_freq;
lower_sbs_freq_range[phy_id].high_2ghz_freq =
ref_freq[phy_id].high_2ghz_freq;
/* update for shared mac */
if (lower_sbs_freq_range[phy_id].low_2ghz_freq) {
lower_sbs_freq_range[phy_id].low_5ghz_freq =
ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
} else {
lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
lower_sbs_freq_range[phy_id].high_5ghz_freq =
ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
}
}
}
static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)
{
static const char * const mode_str[] = {
[ATH12K_HW_MODE_SMM] = "SMM",
[ATH12K_HW_MODE_DBS] = "DBS",
[ATH12K_HW_MODE_SBS] = "SBS",
[ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE",
[ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE",
};
if (hw_mode >= ARRAY_SIZE(mode_str))
return "Unknown";
return mode_str[hw_mode];
}
static void
ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab,
struct ath12k_hw_mode_freq_range_arg *freq_range,
enum ath12k_hw_mode hw_mode)
{
u8 i;
for (i = 0; i < MAX_RADIOS; i++)
if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq)
ath12k_dbg(ab, ATH12K_DBG_WMI,
"frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
ath12k_wmi_hw_mode_to_str(hw_mode),
hw_mode, i,
freq_range[i].low_2ghz_freq,
freq_range[i].high_2ghz_freq,
freq_range[i].low_5ghz_freq,
freq_range[i].high_5ghz_freq);
}
static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab)
{
struct ath12k_hw_mode_freq_range_arg *freq_range;
u8 i;
for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) {
freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i];
ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i);
}
}
static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id)
{
struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range;
struct ath12k_hw_mode_freq_range_arg *non_shared_range;
u8 shared_phy_id;
sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id];
/* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id
* keep the range as it is in SBS
*/
if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq)
return 0;
if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) {
ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz");
ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS);
return -EINVAL;
}
non_shared_range = sbs_mac_range;
/* if SBS mac range has only 5 GHz then it's the non-shared phy, so
* modify the range as per the shared mac.
*/
shared_phy_id = phy_id ? 0 : 1;
shared_mac_range =
&hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id];
if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) {
ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared");
/* If the shared mac lower 5 GHz frequency is greater than
* non-shared mac lower 5 GHz frequency then the shared mac has
* high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high
* freq should be less than the shared mac's low 5 GHz freq.
*/
if (non_shared_range->high_5ghz_freq >=
shared_mac_range->low_5ghz_freq)
non_shared_range->high_5ghz_freq =
max_t(u32, shared_mac_range->low_5ghz_freq - 10,
non_shared_range->low_5ghz_freq);
} else if (shared_mac_range->high_5ghz_freq <
non_shared_range->high_5ghz_freq) {
ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared");
/* If the shared mac high 5 GHz frequency is less than
* non-shared mac high 5 GHz frequency then the shared mac has
* low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low
* freq should be greater than the shared mac's high 5 GHz freq.
*/
if (shared_mac_range->high_5ghz_freq >=
non_shared_range->low_5ghz_freq)
non_shared_range->low_5ghz_freq =
min_t(u32, shared_mac_range->high_5ghz_freq + 10,
non_shared_range->high_5ghz_freq);
} else {
ath12k_warn(ab, "invalid SBS range with all 5 GHz shared");
return -EINVAL;
}
return 0;
}
static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab)
{
struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
struct ath12k_hw_mode_freq_range_arg *mac_range;
u16 sbs_range_sep;
u8 phy_id;
int ret;
mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS];
/* If sbs_lower_band_end_freq has a value, then the frequency range
* will be split using that value.
*/
sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq;
if (sbs_range_sep) {
ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep,
mac_range);
ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep,
mac_range);
/* Hardware specifies the range boundary with sbs_range_sep,
* (i.e. the boundary between 5 GHz high and 5 GHz low),
* reset the original one to make sure it will not get used.
*/
memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
return;
}
/* If sbs_lower_band_end_freq is not set that means firmware will send one
* shared mac range and one non-shared mac range. so update that freq.
*/
for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
ret = ath12k_wmi_modify_sbs_freq(ab, phy_id);
if (ret) {
memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
break;
}
}
}
static void
ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type hw_config_type,
u32 phy_id,
struct ath12k_svc_ext_mac_phy_info *mac_cap)
{
if (phy_id >= MAX_RADIOS) {
ath12k_err(ab, "mac more than two not supported: %d", phy_id);
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
hw_config_type, phy_id, mac_cap->supported_bands,
ab->wmi_ab.sbs_lower_band_end_freq,
mac_cap->hw_freq_range.low_2ghz_freq,
mac_cap->hw_freq_range.high_2ghz_freq,
mac_cap->hw_freq_range.low_5ghz_freq,
mac_cap->hw_freq_range.high_5ghz_freq);
switch (hw_config_type) {
case WMI_HOST_HW_MODE_SINGLE:
if (phy_id) {
ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported");
break;
}
ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id);
break;
case WMI_HOST_HW_MODE_DBS:
if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
ath12k_wmi_update_freq_info(ab, mac_cap,
ATH12K_HW_MODE_DBS, phy_id);
break;
case WMI_HOST_HW_MODE_DBS_SBS:
case WMI_HOST_HW_MODE_DBS_OR_SBS:
ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id);
if (ab->wmi_ab.sbs_lower_band_end_freq ||
mac_cap->hw_freq_range.low_5ghz_freq ||
mac_cap->hw_freq_range.low_2ghz_freq)
ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS,
phy_id);
if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
ath12k_wmi_update_dbs_freq_info(ab);
if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
ath12k_wmi_update_sbs_freq_info(ab);
break;
case WMI_HOST_HW_MODE_SBS:
case WMI_HOST_HW_MODE_SBS_PASSIVE:
ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id);
if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
ath12k_wmi_update_sbs_freq_info(ab);
break;
default:
break;
}
}
static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab)
{
if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) ||
(ab->wmi_ab.sbs_lower_band_end_freq &&
ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) &&
ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE)))
return true;
return false;
}
static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab)
{
struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
enum wmi_host_hw_mode_config_type hw_config_type;
struct ath12k_svc_ext_mac_phy_info *tmp;
bool dbs_mode = false, sbs_mode = false;
u32 i, j = 0;
if (!svc_ext_info->num_hw_modes) {
ath12k_err(ab, "invalid number of hw modes");
return -EINVAL;
}
ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d",
svc_ext_info->num_hw_modes);
memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps));
for (i = 0; i < svc_ext_info->num_hw_modes; i++) {
if (j >= ATH12K_MAX_MAC_PHY_CAP)
return -EINVAL;
/* Update for MAC0 */
tmp = &svc_ext_info->mac_phy_info[j++];
hw_config_type = tmp->hw_mode_config_type;
ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp);
/* SBS and DBS have dual MAC. Up to 2 MACs are considered. */
if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
hw_config_type == WMI_HOST_HW_MODE_SBS ||
hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) {
if (j >= ATH12K_MAX_MAC_PHY_CAP)
return -EINVAL;
/* Update for MAC1 */
tmp = &svc_ext_info->mac_phy_info[j++];
ath12k_wmi_update_mac_freq_info(ab, hw_config_type,
tmp->phy_id, tmp);
if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)
dbs_mode = true;
if (ath12k_wmi_sbs_range_present(ab) &&
(hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
hw_config_type == WMI_HOST_HW_MODE_SBS ||
hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS))
sbs_mode = true;
}
}
info->support_dbs = dbs_mode;
info->support_sbs = sbs_mode;
ath12k_wmi_dump_freq_range(ab);
return 0;
}
static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps;
struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
int ret;
@ -4967,7 +5476,32 @@ static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
}
parse->mac_phy_caps_ext_done = true;
} else if (!parse->hal_reg_caps_ext2_done) {
parse->hal_reg_caps_ext2_done = true;
} else if (!parse->scan_radio_caps_ext2_done) {
parse->scan_radio_caps_ext2_done = true;
} else if (!parse->twt_caps_done) {
parse->twt_caps_done = true;
} else if (!parse->htt_msdu_idx_to_qtype_map_done) {
parse->htt_msdu_idx_to_qtype_map_done = true;
} else if (!parse->dbs_or_sbs_cap_ext_done) {
dbs_or_sbs_caps = ptr;
ab->wmi_ab.sbs_lower_band_end_freq =
__le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq);
ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n",
ab->wmi_ab.sbs_lower_band_end_freq);
ret = ath12k_wmi_update_hw_mode_list(ab);
if (ret) {
ath12k_warn(ab, "failed to update hw mode list: %d\n",
ret);
return ret;
}
parse->dbs_or_sbs_cap_ext_done = true;
}
break;
default:
break;
@ -7626,6 +8160,64 @@ static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
&parse);
}
static void ath12k_wmi_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev;
bool is_end = true;
size_t total_vdevs_started = 0;
int i;
if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
if (list_empty(&stats->vdevs)) {
ath12k_warn(ab, "empty vdev stats");
return;
}
/* FW sends all the active VDEV stats irrespective of PDEV,
* hence limit until the count of all VDEVs started
*/
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar)
total_vdevs_started += pdev->ar->num_started_vdevs;
}
rcu_read_unlock();
if (total_vdevs_started)
is_end = ((++ar->fw_stats.num_vdev_recvd) ==
total_vdevs_started);
list_splice_tail_init(&stats->vdevs,
&ar->fw_stats.vdevs);
if (is_end)
complete(&ar->fw_stats_done);
return;
}
if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
if (list_empty(&stats->bcn)) {
ath12k_warn(ab, "empty beacon stats");
return;
}
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
if (ar->num_started_vdevs)
is_end = ((++ar->fw_stats.num_bcn_recvd) ==
ar->num_started_vdevs);
list_splice_tail_init(&stats->bcn,
&ar->fw_stats.bcn);
if (is_end)
complete(&ar->fw_stats_done);
}
}
static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_fw_stats stats = {};
@ -7655,19 +8247,15 @@ static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *sk
spin_lock_bh(&ar->data_lock);
/* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
* debugfs fw stats. Therefore, processing it separately.
*/
/* Handle WMI_REQUEST_PDEV_STAT status update */
if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
ar->fw_stats.fw_stats_done = true;
complete(&ar->fw_stats_done);
goto complete;
}
/* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested only
* via debugfs fw stats. Hence, processing these in debugfs context.
*/
ath12k_debugfs_fw_stats_process(ar, &stats);
/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
ath12k_wmi_fw_stats_process(ar, &stats);
complete:
complete(&ar->fw_stats_complete);
@ -9911,3 +10499,224 @@ int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
return 0;
}
static int
ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab,
struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap,
struct wmi_mlo_link_set_active_arg *arg)
{
struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg;
u8 i;
if (arg->num_disallow_mode_comb >
ARRAY_SIZE(arg->disallow_bmap)) {
ath12k_warn(ab, "invalid num_disallow_mode_comb: %d",
arg->num_disallow_mode_comb);
return -EINVAL;
}
dislw_bmap_arg = &arg->disallow_bmap[0];
for (i = 0; i < arg->num_disallow_mode_comb; i++) {
dislw_bmap->tlv_header =
ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap));
dislw_bmap->disallowed_mode_bitmap =
cpu_to_le32(dislw_bmap_arg->disallowed_mode);
dislw_bmap->ieee_link_id_comb =
le32_encode_bits(dislw_bmap_arg->ieee_link_id[0],
WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) |
le32_encode_bits(dislw_bmap_arg->ieee_link_id[1],
WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) |
le32_encode_bits(dislw_bmap_arg->ieee_link_id[2],
WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) |
le32_encode_bits(dislw_bmap_arg->ieee_link_id[3],
WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"entry %d disallowed_mode %d ieee_link_id_comb 0x%x",
i, dislw_bmap_arg->disallowed_mode,
dislw_bmap_arg->ieee_link_id_comb);
dislw_bmap++;
dislw_bmap_arg++;
}
return 0;
}
int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
struct wmi_mlo_link_set_active_arg *arg)
{
struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap;
struct wmi_mlo_set_active_link_number_params *link_num_param;
u32 num_link_num_param = 0, num_vdev_bitmap = 0;
struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
struct wmi_mlo_link_set_active_cmd *cmd;
u32 num_inactive_vdev_bitmap = 0;
u32 num_disallow_mode_comb = 0;
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *vdev_bitmap;
void *buf_ptr;
int i, ret;
u32 len;
if (!arg->num_vdev_bitmap && !arg->num_link_entry) {
ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry");
return -EINVAL;
}
switch (arg->force_mode) {
case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM:
case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM:
num_link_num_param = arg->num_link_entry;
fallthrough;
case WMI_MLO_LINK_FORCE_MODE_ACTIVE:
case WMI_MLO_LINK_FORCE_MODE_INACTIVE:
case WMI_MLO_LINK_FORCE_MODE_NO_FORCE:
num_vdev_bitmap = arg->num_vdev_bitmap;
break;
case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE:
num_vdev_bitmap = arg->num_vdev_bitmap;
num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap;
break;
default:
ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode);
return -EINVAL;
}
num_disallow_mode_comb = arg->num_disallow_mode_comb;
len = sizeof(*cmd) +
TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param +
TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap +
TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE +
TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb;
if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE)
len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
skb = ath12k_wmi_alloc_skb(wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD,
sizeof(*cmd));
cmd->force_mode = cpu_to_le32(arg->force_mode);
cmd->reason = cpu_to_le32(arg->reason);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d",
arg->force_mode, arg->reason, num_link_num_param,
num_vdev_bitmap, num_inactive_vdev_bitmap,
num_disallow_mode_comb);
buf_ptr = skb->data + sizeof(*cmd);
tlv = buf_ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
sizeof(*link_num_param) * num_link_num_param);
buf_ptr += TLV_HDR_SIZE;
if (num_link_num_param) {
cmd->ctrl_flags =
le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0,
CRTL_F_DYNC_FORCE_LINK_NUM);
link_num_param = buf_ptr;
for (i = 0; i < num_link_num_param; i++) {
link_num_param->tlv_header =
ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param));
link_num_param->num_of_link =
cpu_to_le32(arg->link_num[i].num_of_link);
link_num_param->vdev_type =
cpu_to_le32(arg->link_num[i].vdev_type);
link_num_param->vdev_subtype =
cpu_to_le32(arg->link_num[i].vdev_subtype);
link_num_param->home_freq =
cpu_to_le32(arg->link_num[i].home_freq);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d",
i, arg->link_num[i].num_of_link,
arg->link_num[i].vdev_type,
arg->link_num[i].vdev_subtype,
arg->link_num[i].home_freq,
__le32_to_cpu(cmd->ctrl_flags));
link_num_param++;
}
buf_ptr += sizeof(*link_num_param) * num_link_num_param;
}
tlv = buf_ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
sizeof(*vdev_bitmap) * num_vdev_bitmap);
buf_ptr += TLV_HDR_SIZE;
if (num_vdev_bitmap) {
vdev_bitmap = buf_ptr;
for (i = 0; i < num_vdev_bitmap; i++) {
vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]);
ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x",
i, arg->vdev_bitmap[i]);
}
buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap;
}
if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
tlv = buf_ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
sizeof(*vdev_bitmap) *
num_inactive_vdev_bitmap);
buf_ptr += TLV_HDR_SIZE;
if (num_inactive_vdev_bitmap) {
vdev_bitmap = buf_ptr;
for (i = 0; i < num_inactive_vdev_bitmap; i++) {
vdev_bitmap[i] =
cpu_to_le32(arg->inactive_vdev_bitmap[i]);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"entry %d inactive_vdev_id_bitmap 0x%x",
i, arg->inactive_vdev_bitmap[i]);
}
buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
}
} else {
/* add empty vdev bitmap2 tlv */
tlv = buf_ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
buf_ptr += TLV_HDR_SIZE;
}
/* add empty ieee_link_id_bitmap tlv */
tlv = buf_ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
buf_ptr += TLV_HDR_SIZE;
/* add empty ieee_link_id_bitmap2 tlv */
tlv = buf_ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
buf_ptr += TLV_HDR_SIZE;
tlv = buf_ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
sizeof(*disallowed_mode_bmap) *
arg->num_disallow_mode_comb);
buf_ptr += TLV_HDR_SIZE;
ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg);
if (ret)
goto free_skb;
ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID);
if (ret) {
ath12k_warn(ab,
"failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret);
goto free_skb;
}
ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd");
return ret;
free_skb:
dev_kfree_skb(skb);
return ret;
}

View File

@ -1974,6 +1974,7 @@ enum wmi_tlv_tag {
WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
WMI_TAG_VDEV_CH_POWER_INFO,
WMI_TAG_MLO_LINK_SET_ACTIVE_CMD = 0x3BE,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@ -2617,6 +2618,8 @@ struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params {
__le32 num_chainmask_tables;
} __packed;
#define WMI_HW_MODE_CAP_CFG_TYPE GENMASK(27, 0)
struct ath12k_wmi_hw_mode_cap_params {
__le32 tlv_header;
__le32 hw_mode_id;
@ -2666,6 +2669,12 @@ struct ath12k_wmi_mac_phy_caps_params {
__le32 he_cap_info_2g_ext;
__le32 he_cap_info_5g_ext;
__le32 he_cap_info_internal;
__le32 wireless_modes;
__le32 low_2ghz_chan_freq;
__le32 high_2ghz_chan_freq;
__le32 low_5ghz_chan_freq;
__le32 high_5ghz_chan_freq;
__le32 nss_ratio;
} __packed;
struct ath12k_wmi_hal_reg_caps_ext_params {
@ -2739,6 +2748,11 @@ struct wmi_service_ready_ext2_event {
__le32 default_num_msduq_supported_per_tid;
} __packed;
struct ath12k_wmi_dbs_or_sbs_cap_params {
__le32 hw_mode_id;
__le32 sbs_lower_band_end_freq;
} __packed;
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
__le32 pdev_and_hw_link_ids;
@ -5049,6 +5063,53 @@ struct ath12k_wmi_pdev {
u32 rx_decap_mode;
};
struct ath12k_hw_mode_freq_range_arg {
u32 low_2ghz_freq;
u32 high_2ghz_freq;
u32 low_5ghz_freq;
u32 high_5ghz_freq;
};
struct ath12k_svc_ext_mac_phy_info {
enum wmi_host_hw_mode_config_type hw_mode_config_type;
u32 phy_id;
u32 supported_bands;
struct ath12k_hw_mode_freq_range_arg hw_freq_range;
};
#define ATH12K_MAX_MAC_PHY_CAP 8
struct ath12k_svc_ext_info {
u32 num_hw_modes;
struct ath12k_svc_ext_mac_phy_info mac_phy_info[ATH12K_MAX_MAC_PHY_CAP];
};
/**
* enum ath12k_hw_mode - enum for host mode
* @ATH12K_HW_MODE_SMM: Single mac mode
* @ATH12K_HW_MODE_DBS: DBS mode
* @ATH12K_HW_MODE_SBS: SBS mode with either high share or low share
* @ATH12K_HW_MODE_SBS_UPPER_SHARE: Higher 5 GHz shared with 2.4 GHz
* @ATH12K_HW_MODE_SBS_LOWER_SHARE: Lower 5 GHz shared with 2.4 GHz
* @ATH12K_HW_MODE_MAX: Max, used to indicate invalid mode
*/
enum ath12k_hw_mode {
ATH12K_HW_MODE_SMM,
ATH12K_HW_MODE_DBS,
ATH12K_HW_MODE_SBS,
ATH12K_HW_MODE_SBS_UPPER_SHARE,
ATH12K_HW_MODE_SBS_LOWER_SHARE,
ATH12K_HW_MODE_MAX,
};
struct ath12k_hw_mode_info {
bool support_dbs:1;
bool support_sbs:1;
struct ath12k_hw_mode_freq_range_arg freq_range_caps[ATH12K_HW_MODE_MAX]
[MAX_RADIOS];
};
struct ath12k_wmi_base {
struct ath12k_base *ab;
struct ath12k_wmi_pdev wmi[MAX_RADIOS];
@ -5066,6 +5127,10 @@ struct ath12k_wmi_base {
enum wmi_host_hw_mode_config_type preferred_hw_mode;
struct ath12k_wmi_target_cap_arg *targ_cap;
struct ath12k_svc_ext_info svc_ext_info;
u32 sbs_lower_band_end_freq;
struct ath12k_hw_mode_info hw_mode_info;
};
struct wmi_pdev_set_bios_interface_cmd {
@ -5997,6 +6062,118 @@ struct wmi_vdev_set_tpc_power_cmd {
*/
} __packed;
#define CRTL_F_DYNC_FORCE_LINK_NUM GENMASK(3, 2)
struct wmi_mlo_link_set_active_cmd {
__le32 tlv_header;
__le32 force_mode;
__le32 reason;
__le32 use_ieee_link_id_bitmap;
struct ath12k_wmi_mac_addr_params ap_mld_mac_addr;
__le32 ctrl_flags;
} __packed;
struct wmi_mlo_set_active_link_number_params {
__le32 tlv_header;
__le32 num_of_link;
__le32 vdev_type;
__le32 vdev_subtype;
__le32 home_freq;
} __packed;
#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1 GENMASK(7, 0)
#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2 GENMASK(15, 8)
#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3 GENMASK(23, 16)
#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4 GENMASK(31, 24)
struct wmi_disallowed_mlo_mode_bitmap_params {
__le32 tlv_header;
__le32 disallowed_mode_bitmap;
__le32 ieee_link_id_comb;
} __packed;
enum wmi_mlo_link_force_mode {
WMI_MLO_LINK_FORCE_MODE_ACTIVE = 1,
WMI_MLO_LINK_FORCE_MODE_INACTIVE = 2,
WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM = 3,
WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM = 4,
WMI_MLO_LINK_FORCE_MODE_NO_FORCE = 5,
WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE = 6,
WMI_MLO_LINK_FORCE_MODE_NON_FORCE_UPDATE = 7,
};
enum wmi_mlo_link_force_reason {
WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT = 1,
WMI_MLO_LINK_FORCE_REASON_NEW_DISCONNECT = 2,
WMI_MLO_LINK_FORCE_REASON_LINK_REMOVAL = 3,
WMI_MLO_LINK_FORCE_REASON_TDLS = 4,
WMI_MLO_LINK_FORCE_REASON_REVERT_FAILURE = 5,
WMI_MLO_LINK_FORCE_REASON_LINK_DELETE = 6,
WMI_MLO_LINK_FORCE_REASON_SINGLE_LINK_EMLSR_OP = 7,
};
struct wmi_mlo_link_num_arg {
u32 num_of_link;
u32 vdev_type;
u32 vdev_subtype;
u32 home_freq;
};
struct wmi_mlo_control_flags_arg {
bool overwrite_force_active_bitmap;
bool overwrite_force_inactive_bitmap;
bool dync_force_link_num;
bool post_re_evaluate;
u8 post_re_evaluate_loops;
bool dont_reschedule_workqueue;
};
struct wmi_ml_link_force_cmd_arg {
u8 ap_mld_mac_addr[ETH_ALEN];
u16 ieee_link_id_bitmap;
u16 ieee_link_id_bitmap2;
u8 link_num;
};
struct wmi_ml_disallow_mode_bmap_arg {
u32 disallowed_mode;
union {
u32 ieee_link_id_comb;
u8 ieee_link_id[4];
};
};
/* maximum size of link number param array
* for MLO link set active command
*/
#define WMI_MLO_LINK_NUM_SZ 2
/* maximum size of vdev bitmap array for
* MLO link set active command
*/
#define WMI_MLO_VDEV_BITMAP_SZ 2
/* Max number of disallowed bitmap combination
* sent to firmware
*/
#define WMI_ML_MAX_DISALLOW_BMAP_COMB 4
struct wmi_mlo_link_set_active_arg {
enum wmi_mlo_link_force_mode force_mode;
enum wmi_mlo_link_force_reason reason;
u32 num_link_entry;
u32 num_vdev_bitmap;
u32 num_inactive_vdev_bitmap;
struct wmi_mlo_link_num_arg link_num[WMI_MLO_LINK_NUM_SZ];
u32 vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ];
u32 inactive_vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ];
struct wmi_mlo_control_flags_arg ctrl_flags;
bool use_ieee_link_id;
struct wmi_ml_link_force_cmd_arg force_cmd;
u32 num_disallow_mode_comb;
struct wmi_ml_disallow_mode_bmap_arg disallow_bmap[WMI_ML_MAX_DISALLOW_BMAP_COMB];
};
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@ -6195,5 +6372,6 @@ bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar);
int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
u32 vdev_id,
struct ath12k_reg_tpc_power_info *param);
int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
struct wmi_mlo_link_set_active_arg *param);
#endif

View File

@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
* We need to do some backwards compatibility to make this work.
*/
if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
WARN_ON(1);
ath6kl_err("mismatched byte count %d vs. expected %zd\n",
le32_to_cpu(targ_info->byte_count),
sizeof(*targ_info));
return -EINVAL;
}

View File

@ -438,14 +438,21 @@ static void carl9170_usb_rx_complete(struct urb *urb)
if (atomic_read(&ar->rx_anch_urbs) == 0) {
/*
* The system is too slow to cope with
* the enormous workload. We have simply
* run out of active rx urbs and this
* unfortunately leads to an unpredictable
* device.
* At this point, either the system is too slow to
* cope with the enormous workload (so we have simply
* run out of active rx urbs and this unfortunately
* leads to an unpredictable device), or the device
* is not fully functional after an unsuccessful
* firmware loading attempts (so it doesn't pass
* ieee80211_register_hw() and there is no internal
* workqueue at all).
*/
ieee80211_queue_work(ar->hw, &ar->ping_work);
if (ar->registered)
ieee80211_queue_work(ar->hw, &ar->ping_work);
else
pr_warn_once("device %s is not registered\n",
dev_name(&ar->udev->dev));
}
} else {
/*

View File

@ -1316,6 +1316,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
sizeof(trans->conf.no_reclaim_cmds));
memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
sizeof(no_reclaim_cmds));
trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:

View File

@ -77,6 +77,7 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
/* Setup async RX handling */
spin_lock_init(&mld->async_handlers_lock);
INIT_LIST_HEAD(&mld->async_handlers_list);
wiphy_work_init(&mld->async_handlers_wk,
iwl_mld_async_handlers_wk);

View File

@ -34,7 +34,7 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
WIDE_ID(MAC_CONF_GROUP,
MAC_CONFIG_CMD), 0);
if (WARN_ON(cmd_ver < 1 && cmd_ver > 3))
if (WARN_ON(cmd_ver < 1 || cmd_ver > 3))
return;
cmd->id_and_color = cpu_to_le32(mvmvif->id);

View File

@ -166,7 +166,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
u32 control_flags = 0, rb_size, cb_size;
dma_addr_t phys;
int ret;
@ -202,11 +202,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
WARN_ON(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)) > 12);
cb_size = RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans));
if (WARN_ON(cb_size > 12))
cb_size = 12;
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
control_flags |=
u32_encode_bits(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)),
IWL_CTXT_INFO_RB_CB_SIZE);
control_flags |= u32_encode_bits(cb_size, IWL_CTXT_INFO_RB_CB_SIZE);
control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);

View File

@ -1278,7 +1278,7 @@ struct ieee80211_ext {
u8 sa[ETH_ALEN];
__le32 timestamp;
u8 change_seq;
u8 variable[0];
u8 variable[];
} __packed s1g_beacon;
} u;
} __packed __aligned(2);
@ -1536,7 +1536,7 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
__le16 capability;
u8 variable[0];
u8 variable[];
} __packed tdls_discover_resp;
struct {
u8 action_code;
@ -1721,35 +1721,35 @@ struct ieee80211_tdls_data {
struct {
u8 dialog_token;
__le16 capability;
u8 variable[0];
u8 variable[];
} __packed setup_req;
struct {
__le16 status_code;
u8 dialog_token;
__le16 capability;
u8 variable[0];
u8 variable[];
} __packed setup_resp;
struct {
__le16 status_code;
u8 dialog_token;
u8 variable[0];
u8 variable[];
} __packed setup_cfm;
struct {
__le16 reason_code;
u8 variable[0];
u8 variable[];
} __packed teardown;
struct {
u8 dialog_token;
u8 variable[0];
u8 variable[];
} __packed discover_req;
struct {
u8 target_channel;
u8 oper_class;
u8 variable[0];
u8 variable[];
} __packed chan_switch_req;
struct {
__le16 status_code;
u8 variable[0];
u8 variable[];
} __packed chan_switch_resp;
} u;
} __packed;

View File

@ -1,10 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Portions
* Copyright (C) 2022 - 2024 Intel Corporation
* Copyright (C) 2022 - 2025 Intel Corporation
*/
#ifndef __MAC80211_DEBUG_H
#define __MAC80211_DEBUG_H
#include <linux/once_lite.h>
#include <net/cfg80211.h>
#ifdef CONFIG_MAC80211_OCB_DEBUG
@ -152,6 +153,8 @@ do { \
else \
_sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \
} while (0)
#define link_err_once(link, fmt, ...) \
DO_ONCE_LITE(link_err, link, fmt, ##__VA_ARGS__)
#define link_id_info(sdata, link_id, fmt, ...) \
do { \
if (ieee80211_vif_is_mld(&sdata->vif)) \

View File

@ -4432,6 +4432,10 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
if (!multicast &&
!ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
return false;
/* reject invalid/our STA address */
if (!is_valid_ether_addr(hdr->addr2) ||
ether_addr_equal(sdata->dev->dev_addr, hdr->addr2))
return false;
if (!rx->sta) {
int rate_idx;
if (status->encoding != RX_ENC_LEGACY)

View File

@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2018-2024 Intel Corporation
* Copyright (C) 2018-2025 Intel Corporation
*
* Transmit and frame generation functions.
*/
@ -5016,12 +5016,25 @@ static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
}
}
static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
static u8 __ieee80211_beacon_update_cntdwn(struct ieee80211_link_data *link,
struct beacon_data *beacon)
{
beacon->cntdwn_current_counter--;
if (beacon->cntdwn_current_counter == 1) {
/*
* Channel switch handling is done by a worker thread while
* beacons get pulled from hardware timers. It's therefore
* possible that software threads are slow enough to not be
* able to complete CSA handling in a single beacon interval,
* in which case we get here. There isn't much to do about
* it, other than letting the user know that the AP isn't
* behaving correctly.
*/
link_err_once(link,
"beacon TX faster than countdown (channel/color switch) completion\n");
return 0;
}
/* the counter should never reach 0 */
WARN_ON_ONCE(!beacon->cntdwn_current_counter);
beacon->cntdwn_current_counter--;
return beacon->cntdwn_current_counter;
}
@ -5052,7 +5065,7 @@ u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_i
if (!beacon)
goto unlock;
count = __ieee80211_beacon_update_cntdwn(beacon);
count = __ieee80211_beacon_update_cntdwn(link, beacon);
unlock:
rcu_read_unlock();
@ -5450,7 +5463,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
__ieee80211_beacon_update_cntdwn(beacon);
__ieee80211_beacon_update_cntdwn(link, beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
@ -5482,7 +5495,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
* for now we leave it consistent with overall
* mac80211's behavior.
*/
__ieee80211_beacon_update_cntdwn(beacon);
__ieee80211_beacon_update_cntdwn(link, beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}