Loading core/dp/htt/htt_rx_ll.c +101 −41 Original line number Diff line number Diff line /* * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the Loading Loading @@ -107,6 +107,22 @@ htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf) } } /** * htt_rx_ring_smmu_mapped() - check if rx ring is smmu mapped or not * @pdev: HTT pdev handle * * Return: true or false. */ static inline bool htt_rx_ring_smmu_mapped(htt_pdev_handle pdev) { if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) return true; else return false; } static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev) { int idx; Loading Loading @@ -375,14 +391,9 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num) int filled = 0; int debt_served = 0; qdf_mem_info_t mem_map_table = {0}; bool ipa_smmu = false; idx = *pdev->rx_ring.alloc_idx.vaddr; if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; if ((idx < 0) || (idx > pdev->rx_ring.size_mask) || (num > pdev->rx_ring.size)) { QDF_TRACE(QDF_MODULE_ID_HTT, Loading Loading @@ -476,10 +487,12 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num) pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf; } if (ipa_smmu) { /* Caller already protected this function with refill_lock */ if (qdf_nbuf_is_rx_ipa_smmu_map(rx_netbuf)) { qdf_update_mem_map_table(pdev->osdev, &mem_map_table, paddr, HTT_RX_BUF_SIZE); cds_smmu_map_unmap(true, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap(true, 1, &mem_map_table)); } pdev->rx_ring.buf.paddrs_ring[idx] = paddr_marked; Loading Loading @@ -1141,6 +1154,15 @@ htt_rx_hash_list_insert(struct htt_pdev_t *pdev, RX_HASH_LOG(qdf_print("rx hash: paddr 0x%x netbuf %pK bucket %d\n", paddr, netbuf, (int)i)); if (htt_rx_ring_smmu_mapped(pdev)) { if (qdf_unlikely(qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) { qdf_err("Already smmu mapped, nbuf: %pK", netbuf); qdf_assert_always(0); } qdf_nbuf_set_rx_ipa_smmu_map(netbuf, true); } HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]); HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]); Loading Loading @@ -1203,6 +1225,13 @@ qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, } } if (netbuf && htt_rx_ring_smmu_mapped(pdev)) { if (qdf_unlikely(!qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) { qdf_err("smmu not mapped nbuf: %pK", netbuf); qdf_assert_always(0); } } RX_HASH_LOG(qdf_print("rx hash: paddr 0x%llx, netbuf %pK, bucket %d\n", (unsigned long long)paddr, netbuf, (int)i)); HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]); Loading Loading @@ -1310,11 +1339,8 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev) if (!pdev->rx_ring.hash_table) return; if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock); ipa_smmu = htt_rx_ring_smmu_mapped(pdev); hash_table = pdev->rx_ring.hash_table; pdev->rx_ring.hash_table = NULL; qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock); Loading @@ -1329,14 +1355,26 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev) listnode_offset); if (hash_entry->netbuf) { if (ipa_smmu) { if (qdf_unlikely( !qdf_nbuf_is_rx_ipa_smmu_map( hash_entry->netbuf))) { qdf_err("nbuf: %pK NOT mapped", hash_entry->netbuf); qdf_assert_always(0); } qdf_nbuf_set_rx_ipa_smmu_map( hash_entry->netbuf, false); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR( hash_entry->netbuf), HTT_RX_BUF_SIZE); cds_smmu_map_unmap(false, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap( false, 1, &mem_map_table)); } #ifdef DEBUG_DMA_DONE qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf, Loading Loading @@ -1448,7 +1486,6 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, qdf_dma_addr_t paddr; qdf_mem_info_t mem_map_table = {0}; int ret = 1; bool ipa_smmu = false; struct htt_host_rx_desc_base *timestamp_rx_desc = NULL; HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0); Loading @@ -1466,10 +1503,6 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1)); HTT_RX_CHECK_MSDU_COUNT(msdu_count); if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind); htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count); Loading @@ -1495,11 +1528,24 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, } while (msdu_count > 0) { if (ipa_smmu) { if (qdf_nbuf_is_rx_ipa_smmu_map(msdu)) { /* * nbuf was already detached from hash_entry, * there is no parallel IPA context to access * this nbuf for smmu map/unmap, so updating * this flag here without lock. * * This flag was not updated in netbuf_pop context * htt_rx_hash_list_lookup (where lock held), to * differentiate whether this nbuf to be * smmu unmapped or it was never mapped so far. */ qdf_nbuf_set_rx_ipa_smmu_map(msdu, false); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR(msdu), HTT_RX_BUF_SIZE); cds_smmu_map_unmap(false, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap(false, 1, &mem_map_table)); } /* Loading Loading @@ -2217,14 +2263,13 @@ int htt_rx_attach(struct htt_pdev_t *pdev) void htt_rx_detach(struct htt_pdev_t *pdev) { bool ipa_smmu = false; qdf_nbuf_t nbuf; qdf_timer_stop(&pdev->rx_ring.refill_retry_timer); qdf_timer_free(&pdev->rx_ring.refill_retry_timer); htt_rx_dbg_rxbuf_deinit(pdev); if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; ipa_smmu = htt_rx_ring_smmu_mapped(pdev); if (pdev->cfg.is_full_reorder_offload) { qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev, Loading @@ -2241,29 +2286,31 @@ void htt_rx_detach(struct htt_pdev_t *pdev) qdf_mem_info_t mem_map_table = {0}; while (sw_rd_idx != *pdev->rx_ring.alloc_idx.vaddr) { nbuf = pdev->rx_ring.buf.netbufs_ring[sw_rd_idx]; if (ipa_smmu) { if (qdf_unlikely( !qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { qdf_err("smmu not mapped, nbuf: %pK", nbuf); qdf_assert_always(0); } qdf_nbuf_set_rx_ipa_smmu_map(nbuf, false); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR( pdev->rx_ring.buf. netbufs_ring[sw_rd_idx]), QDF_NBUF_CB_PADDR(nbuf), HTT_RX_BUF_SIZE); cds_smmu_map_unmap(false, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap(false, 1, &mem_map_table)); } #ifdef DEBUG_DMA_DONE qdf_nbuf_unmap(pdev->osdev, pdev->rx_ring.buf. netbufs_ring[sw_rd_idx], qdf_nbuf_unmap(pdev->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); #else qdf_nbuf_unmap(pdev->osdev, pdev->rx_ring.buf. netbufs_ring[sw_rd_idx], qdf_nbuf_unmap(pdev->osdev, nbuf, QDF_DMA_FROM_DEVICE); #endif qdf_nbuf_free(pdev->rx_ring.buf. netbufs_ring[sw_rd_idx]); qdf_nbuf_free(nbuf); sw_rd_idx++; sw_rd_idx &= pdev->rx_ring.size_mask; } Loading Loading @@ -2298,6 +2345,7 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev) struct htt_rx_hash_bucket **hash_table; struct htt_list_node *list_iter = NULL; qdf_mem_info_t mem_map_table = {0}; qdf_nbuf_t nbuf; int ret; qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock); Loading @@ -2311,15 +2359,27 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev) (struct htt_rx_hash_entry *)((char *)list_iter - pdev->rx_ring. listnode_offset); if (hash_entry->netbuf) { nbuf = hash_entry->netbuf; if (nbuf) { if (qdf_unlikely(map == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { qdf_err("map/unmap err:%d, nbuf:%pK", map, nbuf); list_iter = list_iter->next; continue; } qdf_nbuf_set_rx_ipa_smmu_map(nbuf, map); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR( hash_entry->netbuf), QDF_NBUF_CB_PADDR(nbuf), HTT_RX_BUF_SIZE); ret = cds_smmu_map_unmap(map, 1, &mem_map_table); if (ret) { qdf_nbuf_set_rx_ipa_smmu_map(nbuf, !map); qdf_err("map: %d failure, nbuf: %pK", map, nbuf); qdf_spin_unlock_bh( &pdev->rx_ring.rx_hash_lock); return QDF_STATUS_E_FAILURE; Loading @@ -2329,6 +2389,7 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev) } } pdev->rx_ring.smmu_map = map; qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock); return QDF_STATUS_SUCCESS; Loading @@ -2345,7 +2406,6 @@ QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map) return QDF_STATUS_SUCCESS; qdf_spin_lock_bh(&pdev->rx_ring.refill_lock); pdev->rx_ring.smmu_map = map; status = htt_rx_hash_smmu_map(map, pdev); qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock); Loading core/hdd/src/wlan_hdd_cfg80211.h +0 −2 Original line number Diff line number Diff line Loading @@ -73,8 +73,6 @@ struct hdd_context; #define VENDOR1_AP_OUI_TYPE "\x00\xE0\x4C" #define VENDOR1_AP_OUI_TYPE_SIZE 3 #define WLAN_BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 #define WLAN_BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BASIC_RATE_MASK 0x80 #define RATE_MASK 0x7f Loading core/hdd/src/wlan_hdd_hostapd.c +45 −2 Original line number Diff line number Diff line Loading @@ -113,6 +113,22 @@ #define MAX_SAP_NUM_CONCURRENCY_WITH_NAN 1 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_HT_PHY #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_VHT_PHY #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_SAE_H2E #define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_HE_PHY #define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 #endif /* * 11B, 11G Rate table include Basic rate and Extended rate * The IDX field is the rate index Loading Loading @@ -3964,15 +3980,36 @@ static void wlan_hdd_check_11gmode(const u8 *ie, u8 *require_ht, } } else { if ((BASIC_RATE_MASK | WLAN_BSS_MEMBERSHIP_SELECTOR_HT_PHY) == ie[i]) BSS_MEMBERSHIP_SELECTOR_HT_PHY) == ie[i]) *require_ht = true; else if ((BASIC_RATE_MASK | WLAN_BSS_MEMBERSHIP_SELECTOR_VHT_PHY) == ie[i]) BSS_MEMBERSHIP_SELECTOR_VHT_PHY) == ie[i]) *require_vht = true; } } } /** * wlan_hdd_check_h2e() - check SAE/H2E require flag from support rate sets * @rs: support rate or extended support rate set * @require_h2e: pointer to store require h2e flag * * Return: none */ static void wlan_hdd_check_h2e(const tSirMacRateSet *rs, bool *require_h2e) { uint8_t i; if (!rs || !require_h2e) return; for (i = 0; i < rs->numRates; i++) { if (rs->rate[i] == (BASIC_RATE_MASK | BSS_MEMBERSHIP_SELECTOR_SAE_H2E)) *require_h2e = true; } } #ifdef WLAN_FEATURE_11AX /** * wlan_hdd_add_extn_ie() - add extension IE Loading Loading @@ -5584,6 +5621,12 @@ int wlan_hdd_cfg80211_start_bss(struct hdd_adapter *adapter, config->extended_rates.rate, config->extended_rates.numRates); } config->require_h2e = false; wlan_hdd_check_h2e(&config->supported_rates, &config->require_h2e); wlan_hdd_check_h2e(&config->extended_rates, &config->require_h2e); } if (!cds_is_sub_20_mhz_enabled()) Loading core/hdd/src/wlan_hdd_p2p.c +9 −1 Original line number Diff line number Diff line /* * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the Loading Loading @@ -450,6 +451,7 @@ int hdd_set_p2p_noa(struct net_device *dev, uint8_t *command) hdd_debug("P2P_SET GO noa: count=%d interval=%d duration=%d", count, interval, duration); duration = MS_TO_TU_MUS(duration); interval = MS_TO_TU_MUS(interval); /* PS Selection * Periodic noa (2) * Single NOA (4) Loading @@ -457,15 +459,21 @@ int hdd_set_p2p_noa(struct net_device *dev, uint8_t *command) noa.opp_ps = 0; noa.ct_window = 0; if (count == 1) { if (duration > interval) duration = interval; noa.duration = 0; noa.single_noa_duration = duration; noa.ps_selection = P2P_POWER_SAVE_TYPE_SINGLE_NOA; } else { if (count && (duration >= interval)) { hdd_err("Duration should be less than interval"); return -EINVAL; } noa.duration = duration; noa.single_noa_duration = 0; noa.ps_selection = P2P_POWER_SAVE_TYPE_PERIODIC_NOA; } noa.interval = MS_TO_TU_MUS(interval); noa.interval = interval; noa.count = count; noa.vdev_id = adapter->vdev_id; Loading core/mac/inc/qwlan_version.h +2 −2 Original line number Diff line number Diff line Loading @@ -32,9 +32,9 @@ #define QWLAN_VERSION_MAJOR 5 #define QWLAN_VERSION_MINOR 2 #define QWLAN_VERSION_PATCH 022 #define QWLAN_VERSION_EXTRA "G" #define QWLAN_VERSION_EXTRA "L" #define QWLAN_VERSION_BUILD 10 #define QWLAN_VERSIONSTR "5.2.022.10G" #define QWLAN_VERSIONSTR "5.2.022.10L" #endif /* QWLAN_VERSION_H */ Loading
core/dp/htt/htt_rx_ll.c +101 −41 Original line number Diff line number Diff line /* * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the Loading Loading @@ -107,6 +107,22 @@ htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf) } } /** * htt_rx_ring_smmu_mapped() - check if rx ring is smmu mapped or not * @pdev: HTT pdev handle * * Return: true or false. */ static inline bool htt_rx_ring_smmu_mapped(htt_pdev_handle pdev) { if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) return true; else return false; } static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev) { int idx; Loading Loading @@ -375,14 +391,9 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num) int filled = 0; int debt_served = 0; qdf_mem_info_t mem_map_table = {0}; bool ipa_smmu = false; idx = *pdev->rx_ring.alloc_idx.vaddr; if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; if ((idx < 0) || (idx > pdev->rx_ring.size_mask) || (num > pdev->rx_ring.size)) { QDF_TRACE(QDF_MODULE_ID_HTT, Loading Loading @@ -476,10 +487,12 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num) pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf; } if (ipa_smmu) { /* Caller already protected this function with refill_lock */ if (qdf_nbuf_is_rx_ipa_smmu_map(rx_netbuf)) { qdf_update_mem_map_table(pdev->osdev, &mem_map_table, paddr, HTT_RX_BUF_SIZE); cds_smmu_map_unmap(true, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap(true, 1, &mem_map_table)); } pdev->rx_ring.buf.paddrs_ring[idx] = paddr_marked; Loading Loading @@ -1141,6 +1154,15 @@ htt_rx_hash_list_insert(struct htt_pdev_t *pdev, RX_HASH_LOG(qdf_print("rx hash: paddr 0x%x netbuf %pK bucket %d\n", paddr, netbuf, (int)i)); if (htt_rx_ring_smmu_mapped(pdev)) { if (qdf_unlikely(qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) { qdf_err("Already smmu mapped, nbuf: %pK", netbuf); qdf_assert_always(0); } qdf_nbuf_set_rx_ipa_smmu_map(netbuf, true); } HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]); HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]); Loading Loading @@ -1203,6 +1225,13 @@ qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, } } if (netbuf && htt_rx_ring_smmu_mapped(pdev)) { if (qdf_unlikely(!qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) { qdf_err("smmu not mapped nbuf: %pK", netbuf); qdf_assert_always(0); } } RX_HASH_LOG(qdf_print("rx hash: paddr 0x%llx, netbuf %pK, bucket %d\n", (unsigned long long)paddr, netbuf, (int)i)); HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]); Loading Loading @@ -1310,11 +1339,8 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev) if (!pdev->rx_ring.hash_table) return; if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock); ipa_smmu = htt_rx_ring_smmu_mapped(pdev); hash_table = pdev->rx_ring.hash_table; pdev->rx_ring.hash_table = NULL; qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock); Loading @@ -1329,14 +1355,26 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev) listnode_offset); if (hash_entry->netbuf) { if (ipa_smmu) { if (qdf_unlikely( !qdf_nbuf_is_rx_ipa_smmu_map( hash_entry->netbuf))) { qdf_err("nbuf: %pK NOT mapped", hash_entry->netbuf); qdf_assert_always(0); } qdf_nbuf_set_rx_ipa_smmu_map( hash_entry->netbuf, false); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR( hash_entry->netbuf), HTT_RX_BUF_SIZE); cds_smmu_map_unmap(false, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap( false, 1, &mem_map_table)); } #ifdef DEBUG_DMA_DONE qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf, Loading Loading @@ -1448,7 +1486,6 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, qdf_dma_addr_t paddr; qdf_mem_info_t mem_map_table = {0}; int ret = 1; bool ipa_smmu = false; struct htt_host_rx_desc_base *timestamp_rx_desc = NULL; HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0); Loading @@ -1466,10 +1503,6 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1)); HTT_RX_CHECK_MSDU_COUNT(msdu_count); if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind); htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count); Loading @@ -1495,11 +1528,24 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, } while (msdu_count > 0) { if (ipa_smmu) { if (qdf_nbuf_is_rx_ipa_smmu_map(msdu)) { /* * nbuf was already detached from hash_entry, * there is no parallel IPA context to access * this nbuf for smmu map/unmap, so updating * this flag here without lock. * * This flag was not updated in netbuf_pop context * htt_rx_hash_list_lookup (where lock held), to * differentiate whether this nbuf to be * smmu unmapped or it was never mapped so far. */ qdf_nbuf_set_rx_ipa_smmu_map(msdu, false); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR(msdu), HTT_RX_BUF_SIZE); cds_smmu_map_unmap(false, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap(false, 1, &mem_map_table)); } /* Loading Loading @@ -2217,14 +2263,13 @@ int htt_rx_attach(struct htt_pdev_t *pdev) void htt_rx_detach(struct htt_pdev_t *pdev) { bool ipa_smmu = false; qdf_nbuf_t nbuf; qdf_timer_stop(&pdev->rx_ring.refill_retry_timer); qdf_timer_free(&pdev->rx_ring.refill_retry_timer); htt_rx_dbg_rxbuf_deinit(pdev); if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled && pdev->rx_ring.smmu_map) ipa_smmu = true; ipa_smmu = htt_rx_ring_smmu_mapped(pdev); if (pdev->cfg.is_full_reorder_offload) { qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev, Loading @@ -2241,29 +2286,31 @@ void htt_rx_detach(struct htt_pdev_t *pdev) qdf_mem_info_t mem_map_table = {0}; while (sw_rd_idx != *pdev->rx_ring.alloc_idx.vaddr) { nbuf = pdev->rx_ring.buf.netbufs_ring[sw_rd_idx]; if (ipa_smmu) { if (qdf_unlikely( !qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { qdf_err("smmu not mapped, nbuf: %pK", nbuf); qdf_assert_always(0); } qdf_nbuf_set_rx_ipa_smmu_map(nbuf, false); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR( pdev->rx_ring.buf. netbufs_ring[sw_rd_idx]), QDF_NBUF_CB_PADDR(nbuf), HTT_RX_BUF_SIZE); cds_smmu_map_unmap(false, 1, &mem_map_table); qdf_assert_always( !cds_smmu_map_unmap(false, 1, &mem_map_table)); } #ifdef DEBUG_DMA_DONE qdf_nbuf_unmap(pdev->osdev, pdev->rx_ring.buf. netbufs_ring[sw_rd_idx], qdf_nbuf_unmap(pdev->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); #else qdf_nbuf_unmap(pdev->osdev, pdev->rx_ring.buf. netbufs_ring[sw_rd_idx], qdf_nbuf_unmap(pdev->osdev, nbuf, QDF_DMA_FROM_DEVICE); #endif qdf_nbuf_free(pdev->rx_ring.buf. netbufs_ring[sw_rd_idx]); qdf_nbuf_free(nbuf); sw_rd_idx++; sw_rd_idx &= pdev->rx_ring.size_mask; } Loading Loading @@ -2298,6 +2345,7 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev) struct htt_rx_hash_bucket **hash_table; struct htt_list_node *list_iter = NULL; qdf_mem_info_t mem_map_table = {0}; qdf_nbuf_t nbuf; int ret; qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock); Loading @@ -2311,15 +2359,27 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev) (struct htt_rx_hash_entry *)((char *)list_iter - pdev->rx_ring. listnode_offset); if (hash_entry->netbuf) { nbuf = hash_entry->netbuf; if (nbuf) { if (qdf_unlikely(map == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { qdf_err("map/unmap err:%d, nbuf:%pK", map, nbuf); list_iter = list_iter->next; continue; } qdf_nbuf_set_rx_ipa_smmu_map(nbuf, map); qdf_update_mem_map_table(pdev->osdev, &mem_map_table, QDF_NBUF_CB_PADDR( hash_entry->netbuf), QDF_NBUF_CB_PADDR(nbuf), HTT_RX_BUF_SIZE); ret = cds_smmu_map_unmap(map, 1, &mem_map_table); if (ret) { qdf_nbuf_set_rx_ipa_smmu_map(nbuf, !map); qdf_err("map: %d failure, nbuf: %pK", map, nbuf); qdf_spin_unlock_bh( &pdev->rx_ring.rx_hash_lock); return QDF_STATUS_E_FAILURE; Loading @@ -2329,6 +2389,7 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev) } } pdev->rx_ring.smmu_map = map; qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock); return QDF_STATUS_SUCCESS; Loading @@ -2345,7 +2406,6 @@ QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map) return QDF_STATUS_SUCCESS; qdf_spin_lock_bh(&pdev->rx_ring.refill_lock); pdev->rx_ring.smmu_map = map; status = htt_rx_hash_smmu_map(map, pdev); qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock); Loading
core/hdd/src/wlan_hdd_cfg80211.h +0 −2 Original line number Diff line number Diff line Loading @@ -73,8 +73,6 @@ struct hdd_context; #define VENDOR1_AP_OUI_TYPE "\x00\xE0\x4C" #define VENDOR1_AP_OUI_TYPE_SIZE 3 #define WLAN_BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 #define WLAN_BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BASIC_RATE_MASK 0x80 #define RATE_MASK 0x7f Loading
core/hdd/src/wlan_hdd_hostapd.c +45 −2 Original line number Diff line number Diff line Loading @@ -113,6 +113,22 @@ #define MAX_SAP_NUM_CONCURRENCY_WITH_NAN 1 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_HT_PHY #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_VHT_PHY #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_SAE_H2E #define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 #endif #ifndef BSS_MEMBERSHIP_SELECTOR_HE_PHY #define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 #endif /* * 11B, 11G Rate table include Basic rate and Extended rate * The IDX field is the rate index Loading Loading @@ -3964,15 +3980,36 @@ static void wlan_hdd_check_11gmode(const u8 *ie, u8 *require_ht, } } else { if ((BASIC_RATE_MASK | WLAN_BSS_MEMBERSHIP_SELECTOR_HT_PHY) == ie[i]) BSS_MEMBERSHIP_SELECTOR_HT_PHY) == ie[i]) *require_ht = true; else if ((BASIC_RATE_MASK | WLAN_BSS_MEMBERSHIP_SELECTOR_VHT_PHY) == ie[i]) BSS_MEMBERSHIP_SELECTOR_VHT_PHY) == ie[i]) *require_vht = true; } } } /** * wlan_hdd_check_h2e() - check SAE/H2E require flag from support rate sets * @rs: support rate or extended support rate set * @require_h2e: pointer to store require h2e flag * * Return: none */ static void wlan_hdd_check_h2e(const tSirMacRateSet *rs, bool *require_h2e) { uint8_t i; if (!rs || !require_h2e) return; for (i = 0; i < rs->numRates; i++) { if (rs->rate[i] == (BASIC_RATE_MASK | BSS_MEMBERSHIP_SELECTOR_SAE_H2E)) *require_h2e = true; } } #ifdef WLAN_FEATURE_11AX /** * wlan_hdd_add_extn_ie() - add extension IE Loading Loading @@ -5584,6 +5621,12 @@ int wlan_hdd_cfg80211_start_bss(struct hdd_adapter *adapter, config->extended_rates.rate, config->extended_rates.numRates); } config->require_h2e = false; wlan_hdd_check_h2e(&config->supported_rates, &config->require_h2e); wlan_hdd_check_h2e(&config->extended_rates, &config->require_h2e); } if (!cds_is_sub_20_mhz_enabled()) Loading
core/hdd/src/wlan_hdd_p2p.c +9 −1 Original line number Diff line number Diff line /* * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the Loading Loading @@ -450,6 +451,7 @@ int hdd_set_p2p_noa(struct net_device *dev, uint8_t *command) hdd_debug("P2P_SET GO noa: count=%d interval=%d duration=%d", count, interval, duration); duration = MS_TO_TU_MUS(duration); interval = MS_TO_TU_MUS(interval); /* PS Selection * Periodic noa (2) * Single NOA (4) Loading @@ -457,15 +459,21 @@ int hdd_set_p2p_noa(struct net_device *dev, uint8_t *command) noa.opp_ps = 0; noa.ct_window = 0; if (count == 1) { if (duration > interval) duration = interval; noa.duration = 0; noa.single_noa_duration = duration; noa.ps_selection = P2P_POWER_SAVE_TYPE_SINGLE_NOA; } else { if (count && (duration >= interval)) { hdd_err("Duration should be less than interval"); return -EINVAL; } noa.duration = duration; noa.single_noa_duration = 0; noa.ps_selection = P2P_POWER_SAVE_TYPE_PERIODIC_NOA; } noa.interval = MS_TO_TU_MUS(interval); noa.interval = interval; noa.count = count; noa.vdev_id = adapter->vdev_id; Loading
core/mac/inc/qwlan_version.h +2 −2 Original line number Diff line number Diff line Loading @@ -32,9 +32,9 @@ #define QWLAN_VERSION_MAJOR 5 #define QWLAN_VERSION_MINOR 2 #define QWLAN_VERSION_PATCH 022 #define QWLAN_VERSION_EXTRA "G" #define QWLAN_VERSION_EXTRA "L" #define QWLAN_VERSION_BUILD 10 #define QWLAN_VERSIONSTR "5.2.022.10G" #define QWLAN_VERSIONSTR "5.2.022.10L" #endif /* QWLAN_VERSION_H */